pax_global_header00006660000000000000000000000064150542222300014505gustar00rootroot0000000000000052 comment=af2bb99a98d076da99e14c036bcfdd12069f2bb5 tiup-1.16.3/000077500000000000000000000000001505422223000125565ustar00rootroot00000000000000tiup-1.16.3/.codecov.yml000066400000000000000000000004721505422223000150040ustar00rootroot00000000000000codecov: require_ci_to_pass: no notify: wait_for_ci: no coverage: round: nearest range: 30..70 ignore: - "pkg/cluster/api/dmpb" - "pkg/cluster/api/typeutil" flags: tiup: carryforward: true cluster: carryforward: true dm: carryforward: true playground: carryforward: true tiup-1.16.3/.github/000077500000000000000000000000001505422223000141165ustar00rootroot00000000000000tiup-1.16.3/.github/ISSUE_TEMPLATE/000077500000000000000000000000001505422223000163015ustar00rootroot00000000000000tiup-1.16.3/.github/ISSUE_TEMPLATE/bug-report.md000066400000000000000000000006121505422223000207100ustar00rootroot00000000000000--- name: "\U0001F41B Bug Report" about: Something isn't working as expected labels: type/bug --- ## Bug Report Please answer these questions before submitting your issue. Thanks! 1. What did you do? 2. What did you expect to see? 3. What did you see instead? 4. What version of TiUP are you using (`tiup --version`)? tiup-1.16.3/.github/ISSUE_TEMPLATE/challenge-program.md000066400000000000000000000007761505422223000222240ustar00rootroot00000000000000--- name: "\U0001F947 Challenge Program" about: Challenge Program issues labels: challenge-program-2 --- ## Description ## Score * score number ## Mentor(s) * [@xxxx](github url) Contact the mentors: **#tidb-challenge-program** channel in [TiDB Community](https://join.slack.com/t/tidbcommunity/shared_invite/enQtNzc0MzI4ODExMDc4LWYwYmIzMjZkYzJiNDUxMmZlN2FiMGJkZjAyMzQ5NGU0NGY0NzI3NTYwMjAyNGQ1N2I2ZjAxNzc1OGUwYWM0NzE) Slack Workspace ## Recommended Skills * skills 1 * skills 1 ## Learning Materials tiup-1.16.3/.github/ISSUE_TEMPLATE/feature-request.md000066400000000000000000000014441505422223000217470ustar00rootroot00000000000000--- name: "\U0001F680 Feature Request" about: I have a suggestion labels: type/feature-request --- ## Feature Request **Is your feature request related to a problem? Please describe:** **Describe the feature you'd like:** **Why the featue is needed:** **Describe alternatives you've considered:** **Teachability, Documentation, Adoption, Migration Strategy:** tiup-1.16.3/.github/ISSUE_TEMPLATE/general-question.md000066400000000000000000000007731505422223000221140ustar00rootroot00000000000000--- name: "\U0001F914 General Question" about: Usage question that isn't answered in docs or discussion labels: type/question --- ## General Question Before asking a question, make sure you have: - Searched existing Stack Overflow questions. - Googled your question. - Searched open and closed [GitHub issues](https://github.com/pingcap/tiup/issues?utf8=%E2%9C%93&q=is%3Aissue) - Read the documentation: - [TiUP Readme](https://github.com/pingcap/tiup) - [TiUP Doc](https://github.com/pingcap/docs) tiup-1.16.3/.github/pull_request_template.md000066400000000000000000000026121505422223000210600ustar00rootroot00000000000000 ### What problem does this PR solve? close #xxx ### What is changed and how it works? ### Check List Tests - [ ] Unit test - [ ] Integration test - [ ] Manual test (add detailed scripts or steps below) - [ ] No code Code changes - [ ] Has exported function/method change - [ ] Has exported variable/fields change - [ ] Has interface methods change - [ ] Has persistent data change Side effects - [ ] Possible performance regression - [ ] Increased code complexity - [ ] Breaking backward compatibility Related changes - [ ] Need to cherry-pick to the release branch - [ ] Need to update the documentation Release notes: ```release-note NONE ``` tiup-1.16.3/.github/workflows/000077500000000000000000000000001505422223000161535ustar00rootroot00000000000000tiup-1.16.3/.github/workflows/install.yaml000066400000000000000000000042311505422223000205050ustar00rootroot00000000000000name: install on: pull_request: branches: - master - release-* paths: - 'install.sh' push: branches: - release-* paths: - 'install.sh' jobs: install: name: Install runs-on: ubuntu-22.04 env: working-directory: ${{ github.workspace }}/go/src/github.com/${{ github.repository }} steps: - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} path: go/src/github.com/${{ github.repository }} - name: Install And Check working-directory: ${{ env.working-directory }} run: | sh install.sh source ~/.profile which tiup || (echo "no tiup found" && exit 1) ! tiup update --self | grep -i "WARN: adding root certificate" tiup --version local_install: name: Local Install runs-on: ubuntu-latest env: working-directory: ${{ github.workspace }}/go/src/github.com/${{ github.repository }} steps: - name: Set up Go 1.21 uses: actions/setup-go@v4 with: go-version: 1.21.x id: go - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} path: go/src/github.com/${{ github.repository }} - name: Build TiUP working-directory: ${{ env.working-directory }} run: make tiup - name: Setup TiUP run: | mkdir -p ~/.tiup/bin curl https://tiup-mirrors.pingcap.com/root.json -o ~/.tiup/bin/root.json - name: Clone Mirror working-directory: ${{ env.working-directory }} run: ./bin/tiup mirror clone test-mirror - name: Local Install And Check working-directory: ${{ env.working-directory }}/test-mirror run: | sh local_install.sh source ~/.profile which tiup || (echo "no tiup found" && exit 1) tiup --version cp ../bin/tiup $(which tiup) ! tiup list | grep -i "WARN: adding root certificate" tiup --version tiup-1.16.3/.github/workflows/integrate-cluster-cmd.yaml000066400000000000000000000075621505422223000232530ustar00rootroot00000000000000--- name: integrate-cluster-cmd on: schedule: # times are in UTC - cron: '19 21 * * *' pull_request: branches: - master - release-* paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' - 'components/client/**' - 'components/ctl/**' - 'components/dm/**' - 'components/doc/**' - 'components/errdoc/**' - 'components/playground/**' - 'server/**' - 'pkg/version/version.go' - '.github/workflows/integrate-tiup**' - '.github/workflows/integrate-dm**' - '.github/workflows/integrate-playground**' jobs: cluster: runs-on: ubuntu-latest strategy: fail-fast: true matrix: cases: - 'test_cmd' - 'test_cmd_tls_native_ssh' - 'test_upgrade' - 'test_upgrade_tls' - 'test_tikv_cdc' steps: - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Set up Go 1.24 uses: actions/setup-go@v4 with: go-version: 1.24.x id: go - name: Build build_integration_test run: | export PATH=$PATH:$GOPATH/bin sudo apt-get update sudo apt-get install -y build-essential python-is-python3 python3-pip ca-certificates curl sudo install -m 0755 -d /etc/apt/keyrings sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc sudo chmod a+r /etc/apt/keyrings/docker.asc echo \ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null sudo apt-get update sudo apt-get install -y docker-compose-plugin make build_integration_test tiup - name: Build the docker-compose stack run: TIUP_CLUSTER_ROOT=$(pwd) ./docker/up.sh --daemon - name: Check running containers run: | docker ps df -h free -h - name: Run test suite id: test run: | # ensuere docker ssh is ready sleep 3 # should not use -it # ref: https://stackoverflow.com/questions/43099116/error-the-input-device-is-not-a-tty docker exec tiup-cluster-control bash /tiup-cluster/tests/tiup-cluster/run.sh ${{ matrix.cases }} - name: Collect component log if: ${{ failure() }} # if: always() run: | docker exec tiup-cluster-control bash /tiup-cluster/tests/tiup-cluster/script/pull_log.sh /tiup-cluster/logs - name: Detect error log if: ${{ failure() }} # if: always() run: | bash ./tests/tiup-cluster/script/detect_error.sh ./logs/ - name: Upload component log if: ${{ failure() }} # if: always() uses: actions/upload-artifact@v4 with: overwrite: true name: component_logs path: ./logs - name: Output cluster debug log if: ${{ failure() }} # if: always() run: | pwd docker ps df -h free -h find ./logs -type f -exec sh -c 'echo "{}" && cat {} && echo "\n"' \; || true - name: Upload coverage to Codecov run: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov -F cluster -s ./tests/tiup-cluster/cover -f '*.out' # - name: Setup tmate session # uses: mxschmitt/action-tmate@v3 # if: ${{ failure() }} # with: # limit-access-to-actor: true tiup-1.16.3/.github/workflows/integrate-cluster-scale.yaml000066400000000000000000000074521505422223000235750ustar00rootroot00000000000000--- name: integrate-cluster-scale on: schedule: # times are in UTC - cron: '19 21 * * *' pull_request: branches: - master - release-* paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' - 'components/client/**' - 'components/ctl/**' - 'components/dm/**' - 'components/doc/**' - 'components/errdoc/**' - 'components/playground/**' - 'server/**' - 'pkg/version/version.go' - '.github/workflows/integrate-tiup**' - '.github/workflows/integrate-dm**' - '.github/workflows/integrate-playground**' jobs: cluster: runs-on: ubuntu-latest strategy: fail-fast: true matrix: cases: - 'test_scale_core' - 'test_scale_tools' - 'test_scale_core_tls' - 'test_scale_tools_tls' - 'test_scale_tiproxy' steps: - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Set up Go 1.24 uses: actions/setup-go@v4 with: go-version: 1.24.x id: go - name: Build build_integration_test run: | export PATH=$PATH:$GOPATH/bin sudo apt-get update sudo apt-get install -y build-essential python-is-python3 python3-pip ca-certificates curl sudo install -m 0755 -d /etc/apt/keyrings sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc sudo chmod a+r /etc/apt/keyrings/docker.asc echo \ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null sudo apt-get update sudo apt-get install -y docker-compose-plugin make build_integration_test - name: Build the docker-compose stack # with --dev the first run will fail for unknow reason, just retry it and will success now.. run: TIUP_CLUSTER_ROOT=$(pwd) ./docker/up.sh --daemon - name: Check running containers run: | docker ps df -h free -h - name: Run test suite id: test run: | # ensuere docker ssh is ready sleep 5 # should not use -it # ref: https://stackoverflow.com/questions/43099116/error-the-input-device-is-not-a-tty docker exec tiup-cluster-control bash /tiup-cluster/tests/tiup-cluster/run.sh ${{ matrix.cases }} - name: Collect component log if: ${{ failure() }} # if: always() run: | docker exec tiup-cluster-control bash /tiup-cluster/tests/tiup-cluster/script/pull_log.sh /tiup-cluster/logs - name: Detect error log if: ${{ failure() }} run: | bash ./tests/tiup-cluster/script/detect_error.sh ./logs/ - name: Upload component log if: ${{ failure() }} # if: always() uses: actions/upload-artifact@v4 with: overwrite: true name: cluster_logs path: ./logs - name: Output cluster debug log if: ${{ failure() }} # if: always() run: | pwd docker ps df -h free -h find ./logs -type f -exec sh -c 'echo "{}" && cat {} && echo "\n"' \; || true - name: Upload coverage to Codecov run: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov -F cluster -s ./tests/tiup-cluster/cover -f '*.out' tiup-1.16.3/.github/workflows/integrate-dm.yaml000066400000000000000000000074241505422223000214260ustar00rootroot00000000000000--- name: integrate-dm on: schedule: # times are in UTC - cron: '19 21 * * *' pull_request: branches: - master - release-* paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' - 'components/client/**' - 'components/ctl/**' - 'components/cluster/**' - 'components/doc/**' - 'components/errdoc/**' - 'components/playground/**' - 'server/**' - 'pkg/version/version.go' - '.github/workflows/integrate-cluster**' - '.github/workflows/integrate-tiup**' - '.github/workflows/integrate-playground**' jobs: dm: runs-on: ubuntu-latest strategy: fail-fast: true matrix: cases: - '--do-cases test_cmd' - '--do-cases test_upgrade' - '--native-ssh --do-cases test_cmd' - '--native-ssh --do-cases test_upgrade' steps: - name: Set up Go 1.24 uses: actions/setup-go@v4 with: go-version: 1.24.x id: go - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Build build_integration_test run: | export PATH=$PATH:$GOPATH/bin sudo apt-get update sudo apt-get install -y build-essential python-is-python3 python3-pip ca-certificates curl sudo install -m 0755 -d /etc/apt/keyrings sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc sudo chmod a+r /etc/apt/keyrings/docker.asc echo \ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null sudo apt-get update sudo apt-get install -y docker-compose-plugin make build_integration_test make tiup - name: Build the docker-compose stack # with --dev the first run will fail for unknow reason, just retry it and will success now.. run: | TIUP_CLUSTER_ROOT=$(pwd) ./docker/up.sh --daemon --compose ./docker-compose.dm.yml - name: Check running containers run: | docker ps df -h free -h - name: Run test suite id: test run: | # ensuere docker ssh is ready sleep 4 # should not use -it # ref: https://stackoverflow.com/questions/43099116/error-the-input-device-is-not-a-tty docker exec tiup-cluster-control bash /tiup-cluster/tests/tiup-dm/run.sh ${{ matrix.cases }} - name: Collect component log if: ${{ failure() }} # if: ${{ failure() }} run: | docker exec tiup-cluster-control bash -c 'mkdir -p /tiup-cluster/logs; [[ -d ~/.tiup/logs ]] && find ~/.tiup/logs -type f -name "*.log" -exec cp {} /tiup-cluster/logs \; || true' - name: Upload component log if: ${{ failure() }} # if: always() uses: actions/upload-artifact@v4 with: overwrite: true name: dm_logs path: ./logs - name: Output dm debug log if: ${{ failure() }} # if: always() run: | pwd docker ps df -h free -h find ./logs -type f -exec sh -c 'echo "{}" && cat {} && echo "\n"' \; || true - name: Upload coverage to Codecov run: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov -F dm -s ./tests/tiup-dm/cover -f '*.out' tiup-1.16.3/.github/workflows/integrate-playground.yaml000066400000000000000000000047521505422223000232130ustar00rootroot00000000000000--- name: integrate-playground on: schedule: # times are in UTC - cron: '19 21 * * *' pull_request: branches: - master - release-* - feature/* paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' - 'embed/templates/examples/**' - 'components/client/**' - 'components/ctl/**' - 'components/cluster/**' - 'components/doc/**' - 'components/errdoc/**' - 'components/dm/**' - 'server/**' - 'pkg/version/version.go' - '.github/workflows/integrate-cluster**' - '.github/workflows/integrate-dm**' - '.github/workflows/integrate-tiup**' jobs: playground: runs-on: ubuntu-latest timeout-minutes: 10 strategy: fail-fast: true matrix: cases: - "test_playground" steps: - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Set up Go 1.24 uses: actions/setup-go@v4 with: go-version: 1.24.x id: go - name: Build build_tiup_playground_test run: | export PATH=$PATH:$GOPATH/bin sudo apt update sudo apt-get install -y build-essential psmisc make tiup build_tiup_playground_test - name: Run test suite id: test run: | export PATH=$PATH:$PWD/bin/ bash ./tests/tiup-playground/${{ matrix.cases }}.sh - name: Collect component log if: ${{ failure() }} run: | # ignore the rocksdb find ./tests/tiup-playground/_tmp/home/data -type f -name "*.log" | grep -vE '/data/(raft|db|region-meta)/' | xargs cp {} ./logs - name: Upload component log if: ${{ failure() }} uses: actions/upload-artifact@v4 with: overwrite: true name: playground_logs path: ./logs - name: Output playground debug log if: ${{ failure() }} # if: always() run: | for f in $(find ./tests/tiup-playground/_tmp/home/data -type f -name "*.log" | grep -vE '/data/(raft|db|region-meta)/'); do echo "${f}" && cat "${f}"; done - name: Upload coverage to Codecov run: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov -F playground -s ./tests/tiup-playground/cover -f '*.out' tiup-1.16.3/.github/workflows/integrate-tiup.yaml000066400000000000000000000046531505422223000220100ustar00rootroot00000000000000--- name: integrate-tiup on: schedule: # times are in UTC - cron: '19 21 * * *' pull_request: branches: - master - release-* - feature/* paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' - '.github/workflows/integrate-cluster**' - '.github/workflows/integrate-dm**' - '.github/workflows/integrate-playground**' push: branches: - master paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' - '.github/workflows/integrate-cluster**' - '.github/workflows/integrate-dm**' - '.github/workflows/integrate-playground**' jobs: tiup: timeout-minutes: 10 runs-on: ubuntu-latest strategy: fail-fast: true matrix: cases: - "test_tiup" steps: - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Set up Go 1.24 uses: actions/setup-go@v4 with: go-version: 1.24.x id: go - name: Build build_tiup_test run: | export PATH=$PATH:$GOPATH/bin sudo apt update sudo apt-get install -y build-essential make build_tiup_test - name: Run test suite id: test run: | export PATH=$PATH:$PWD/bin echo $PATH bash ./tests/tiup/${{ matrix.cases }}.sh - name: Upload coverage to Codecov run: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov -F tiup -s ./tests/tiup/cover -f '*.out' unit-test: runs-on: ubuntu-latest steps: - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Set up Go 1.24 uses: actions/setup-go@v4 with: go-version: 1.24.x id: go - name: make unit-test run: | export PATH=$PATH:$GOPATH/bin make check make test - name: Upload coverage to Codecov run: | curl -Os https://uploader.codecov.io/latest/linux/codecov chmod +x codecov ./codecov -F unittest -s cover -f '*.out' tiup-1.16.3/.github/workflows/lint.yaml000066400000000000000000000010161505422223000200030ustar00rootroot00000000000000name: lint on: pull_request: jobs: install: name: Lint runs-on: ubuntu-latest env: working-directory: ${{ github.workspace }}/go/src/github.com/${{ github.repository }} steps: - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} path: go/src/github.com/${{ github.repository }} - name: Lint working-directory: ${{ env.working-directory }} run: make lint tiup-1.16.3/.github/workflows/release-tiup.yaml000066400000000000000000000235531505422223000214460ustar00rootroot00000000000000--- name: release-tiup on: push: branches: - ci/gh-* - release-* - master paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' release: types: - published workflow_dispatch: inputs: git-ref: description: git ref required: true staging-mirror: description: Publish to staging mirror required: false type: boolean jobs: release: runs-on: ubuntu-22.04 timeout-minutes: 30 outputs: REL_VER: ${{ steps.build_tiup.outputs.REL_VER }} strategy: fail-fast: true matrix: os: - "linux" - "darwin" arch: - "amd64" - "arm64" env: working-directory: ${{ github.workspace }}/go/src/github.com/${{ github.repository }} steps: - name: Set up Go 1.21 uses: actions/setup-go@v4 with: go-version: 1.21.x id: go - name: Check out code into the Go module directory uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.git-ref || github.event.pull_request.head.sha }} path: go/src/github.com/${{ github.repository }} fetch-depth: 0 - name: Check if this is a staging build id: check_staging working-directory: ${{ env.working-directory }} if: github.event_name != 'release' run: | BRANCH_NAME=$(git rev-parse --abbrev-ref HEAD) if [ "$BRANCH_NAME" != "master" ]; then STAGING_VER=$(git describe --tags | sed 's/-[0-9]*-[^-]*$//')-$(echo $BRANCH_NAME | sed 's|/|.|g') else STAGING_VER=$(git describe --tags | sed 's/-[^-]*$//' | sed -r 's/(-[^-]*$)/-nightly\1/') fi echo ::set-output name=STAGING::$STAGING_VER - name: Get git ref and commit id: get_git working-directory: ${{ env.working-directory }} # when empty, it will be determined by Makefile run: | if [[ $GITHUB_REF == refs/tags/* ]]; then echo ::set-output name=GIT_REF::${GITHUB_REF/refs\/tags\//} elif [[ $GITHUB_REF == refs/heads/* ]]; then echo ::set-output name=GIT_REF::${GITHUB_REF/refs\/heads\//} fi COMMIT_TIMESTAMP=`git show --no-patch --no-notes --pretty='%aI'` echo ::set-output name=COMMIT_TIMESTAMP::$COMMIT_TIMESTAMP - name: Build for ${{ matrix.os }}-${{ matrix.arch }} id: build_tiup working-directory: ${{ env.working-directory }} env: GOOS: ${{ matrix.os }} GOARCH: ${{ matrix.arch }} COMMIT: ${GITHUB_SHA} GITREF: ${{ steps.get_git.outputs.GIT_REF }} actor: ${{ github.actor }} event: ${{ github.event_name }} run: | export GOPATH=${GITHUB_WORKSPACE}/go export PATH=$PATH:$GOPATH/bin if [ ! -z ${{ steps.check_staging.outputs.STAGING }} ]; then export EXTRA_LDFLAGS="-X \"github.com/${{ github.repository }}/pkg/version.TiUPVerName=${{ steps.check_staging.outputs.STAGING }}\"" export REL_VER=${{ steps.check_staging.outputs.STAGING }} else export REL_VER=`git describe --tags --always` fi echo ::set-output name=REL_VER::$REL_VER BUILD_FLAGS='-trimpath -mod=readonly -modcacherw -buildvcs=false' make build && \ BUILD_FLAGS='-trimpath -mod=readonly -modcacherw -buildvcs=false' make ctl && \ echo "Build success." checksum_file=checksum.${{ matrix.os }}-${{ matrix.arch }}.txt checksum_header="TiUP $REL_VER (${actor}@${event}) ${{ github.run_id }}" echo ${checksum_header} > ${checksum_file} echo "sha256sum:" >> ${checksum_file} sha256sum bin/* >> ${checksum_file} echo "b2sum:" >> ${checksum_file} b2sum bin/* >> ${checksum_file} cat ${checksum_file} - name: Package components id: packaging working-directory: ${{ env.working-directory }} env: ARCH: ${{ matrix.os }}-${{ matrix.arch }} REL_VER: ${{ steps.build_tiup.outputs.REL_VER }} run: | TMP_DIR=`mktemp -d` export TIUP_HOME="$TMP_DIR/home" echo ::set-output name=TIUP_HOME::$TIUP_HOME echo ::set-output name=TIUP_BIN::$TIUP_HOME/bin/tiup rm -rf $TIUP_HOME && mkdir -p $TIUP_HOME/{bin,keys} wget -O $TMP_DIR/tiup-linux-amd64.tar.gz -q https://tiup-mirrors.pingcap.com/tiup-linux-amd64.tar.gz tar -zxf $TMP_DIR/tiup-linux-amd64.tar.gz -C $TIUP_HOME/bin && chmod 755 $TIUP_HOME/bin/tiup curl -s https://tiup-mirrors.pingcap.com/root.json -o $TIUP_HOME/bin/root.json mkdir -p package cp $TIUP_HOME/bin/root.json bin/root.json # make tar reproducible TAR="tar --sort=name --mtime=${{ steps.get_git.outputs.COMMIT_TIMESTAMP }} --owner=0 --group=0 --numeric-owner" # package all binaries to upload to github release asset ${TAR} -czf tiup-${REL_VER}-${ARCH}.tar.gz bin checksum.${{ matrix.os }}-${{ matrix.arch }}.txt # package each component # tiup-ctl is destributed with other components, so not packaging / publishing here ${TAR} -C bin -czf package/tiup-${REL_VER}-${ARCH}.tar.gz tiup ${TAR} -C bin -czf package/cluster-${REL_VER}-${ARCH}.tar.gz tiup-cluster ${TAR} -C bin -czf package/dm-${REL_VER}-${ARCH}.tar.gz tiup-dm ${TAR} -C bin -czf package/playground-${REL_VER}-${ARCH}.tar.gz tiup-playground ${TAR} -C bin -czf package/client-${REL_VER}-${ARCH}.tar.gz tiup-client ${TAR} -C bin -czf package/server-${REL_VER}-${ARCH}.tar.gz tiup-server - name: Publish packages working-directory: ${{ env.working-directory }} env: TIUP_MIRRORS: ${{ github.event.inputs.staging-mirror == 'true' && secrets.TIUP_SERVER_STAGING || secrets.TIUP_SERVER_PROD }} TIUP_HOME: ${{ steps.packaging.outputs.TIUP_HOME }} TIUP_BIN: ${{ steps.packaging.outputs.TIUP_BIN }} REL_VER: ${{ steps.build_tiup.outputs.REL_VER }} ARCH: ${{ matrix.os }}-${{ matrix.arch }} tiup_desc: "TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system" cluster_desc: "Deploy a TiDB cluster for production" dm_desc: "Data Migration Platform manager" playground_desc: "Bootstrap a local TiDB cluster for fun" client_desc: "Client to connect playground" server_desc: "TiUP publish/cache server" run: | ${TIUP_BIN} mirror set ${TIUP_MIRRORS} echo ${{ secrets.TIUP_COMP_KEY_PINGCAP }} | base64 -d > $TIUP_HOME/keys/private.json # add a random delay before actually publishing, this can help reduce manifest conflict # when there are multiple jobs running in parallel sleep $[ ( $RANDOM % 10 ) ]s ${TIUP_BIN} mirror publish tiup ${REL_VER} package/tiup-${REL_VER}-${ARCH}.tar.gz tiup --arch ${{ matrix.arch }} --os ${{ matrix.os }} --desc="${tiup_desc}" ${TIUP_BIN} mirror publish cluster ${REL_VER} package/cluster-${REL_VER}-${ARCH}.tar.gz tiup-cluster --arch ${{ matrix.arch }} --os ${{ matrix.os }} --desc="${cluster_desc}" ${TIUP_BIN} mirror publish dm ${REL_VER} package/dm-${REL_VER}-${ARCH}.tar.gz tiup-dm --arch ${{ matrix.arch }} --os ${{ matrix.os }} --desc="${dm_desc}" --standalone ${TIUP_BIN} mirror publish playground ${REL_VER} package/playground-${REL_VER}-${ARCH}.tar.gz tiup-playground --arch ${{ matrix.arch }} --os ${{ matrix.os }} --desc="${playground_desc}" ${TIUP_BIN} mirror publish client ${REL_VER} package/client-${REL_VER}-${ARCH}.tar.gz tiup-client --arch ${{ matrix.arch }} --os ${{ matrix.os }} --desc="${client_desc}" ${TIUP_BIN} mirror publish server ${REL_VER} package/server-${REL_VER}-${ARCH}.tar.gz tiup-server --arch ${{ matrix.arch }} --os ${{ matrix.os }} --desc="${server_desc}" --hide --standalone - name: Upload the all-in-one tarball to GitHub release uses: ncipollo/release-action@v1 if: github.event_name == 'release' with: allowUpdates: true artifacts: ${{ env.working-directory }}/tiup-${{ steps.build_tiup.outputs.REL_VER }}-${{ matrix.os }}-${{ matrix.arch }}.tar.gz artifactContentType: application/gzip omitBodyDuringUpdate: true omitNameDuringUpdate: true prerelease: ${{ github.event.release.prerelease }} token: ${{ secrets.GITHUB_TOKEN }} brew-upgrade: runs-on: ubuntu-latest timeout-minutes: 5 needs: release steps: - name: Check out brew code uses: actions/checkout@v3 continue-on-error: true if: github.event_name == 'release' with: repository: pingcap/homebrew-brew persist-credentials: false ref: master path: ${{ github.workspace }}/homebrew-brew fetch-depth: 0 - name: Update and Check tiup version id: update_version working-directory: ${{ github.workspace }}/homebrew-brew continue-on-error: true if: github.event_name == 'release' run: | sed -i 's/version.*/version "${{ needs.release.outputs.REL_VER }}"/g' Formula/tiup.rb sed -i 's/tag:.*/tag: "${{ needs.release.outputs.REL_VER }}"/g' Formula/tiup.rb cat Formula/tiup.rb - name: Push new homebrew uses: actions-js/push@master continue-on-error: true if: github.event_name == 'release' with: github_token: ${{ secrets.GITHUB_TOKEN }} directory: ${{ github.workspace }}/homebrew-brew message: "tiup: ${{ needs.release.outputs.REL_VER }}" branch: master repository: pingcap/homebrew-brew tiup-1.16.3/.github/workflows/reprotest.yaml000066400000000000000000000031441505422223000210700ustar00rootroot00000000000000# Check for reproducible build name: reprotest # Controls when the action will run. Triggers the workflow on push or pull request # events but only for the master branch on: pull_request: branches: - master - release-* - feature/* paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' push: branches: - release-* paths-ignore: - '**.html' - '**.md' - 'CNAME' - 'LICENSE' - 'doc/**' - 'embed/templates/examples/**' jobs: reprotest: # The type of runner that the job will run on runs-on: ubuntu-22.04 strategy: matrix: go: - 1.21.x # Steps represent a sequence of tasks that will be executed as part of the job steps: - uses: actions/checkout@v3 - name: Set up Go ${{ matrix.go }} uses: actions/setup-go@v4 with: go-version: ${{ matrix.go }} - name: Install reprotest and prepare id: prepare_env run: | echo ::set-output name=GOROOT::$GOROOT echo ::set-output name=GOPATH::$GOPATH sudo apt-get update && sudo apt-get -qy install reprotest - name: Check for reproducible build run: | sudo reprotest \ "make clean && \ GOROOT=${{ steps.prepare_env.GOROOT }} \ GOPATH=${{ steps.prepare_env.GOPATH }} \ PATH=$GOROOT/bin:$PATH \ BUILD_FLAGS='-trimpath -buildmode=pie -mod=readonly -modcacherw -buildvcs=false' \ make build" \ bin tiup-1.16.3/.gitignore000066400000000000000000000007601505422223000145510ustar00rootroot00000000000000vendor/ bin/ *.tar *.gz *.xz # ---> Go # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib # Test binary, build with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE /cover.* /cover/ # Output of the go coverage tool, specifically when used with LiteIDE *.out /.idea/ /.vscode/ /package/ /tests/_tmp/ /tests/tiup_home/ /tests/tiup_mirrors/*.sha1 /tests/tiup_mirrors/ /logs docker/secret/ *__failpoint_binding__.go *__failpoint_stash__ tiup-1.16.3/CHANGELOG.md000066400000000000000000002762501505422223000144030ustar00rootroot00000000000000TiUP Changelog ## [1.16.3] 2025-08-26 ### New Features - `tiup-cluster` now supports victoria metrics (#2520, #2530, #2532, @nolouch) - `tiup-cluster` could specify start/stop timeout for service (#2568, @xhebox) - `tiup-cluster` added `--restart-timeout` for intervals between rolling restart (#2513, @dveeden) - `tiup-playground` supports tikv-worker and --tikv.columnar in tidb-cse mode (#2536, @breezewish) ### Fixes - Fix `tiup-playground` build for 32-bit platforms (#2528, @dveeden) - tiup will now exclude pre-release version from version list (#2550, @xhebox) - `tiup-cluster` check will also verify monitoring ports (#2563, @xhebox) - `tiup-cluster` has better compatibility with SELinux (#2501, @dveeden) - Fix wrong description of `tiup-cluster --ssh` (#2380, @xhebox) - Fix a config typo of PD for `tiup-playground` (#2578, @rleungx) - Fix missing tiflash-proxy metrics for `tiup-playground` (#2587, @Lloyd-Pottiger) - `tiup-cluster` will check if PD has loaded all regions after restart for v8.5.2 and above (#2592, @xhebox) - `tiup-cluster` will clear leader_priority to prevent failure of leader eviction (#2593, @xhebox) ### Improvements - `tiup-playground` added `--perf` for high performance testing usage, for tiflash only (#2535, @breezewish) - `tiup-clusterr` added example for tiflash and tiflash-proxy config for minimal.yaml and multi-dc.yaml (#2551, @JaySon-Huang) - Lots of code cleanup work (#2525, #2518, #2547, #2549, #2540, #2552, #2560, #2557, #2559, #2555, #2556, #2561, #2562, #2564, #2542, #2553, #2566, #2569, #2571, #2576, #2579, #2582, #2577, #2570, #2558, @dveeden, @xhebox, @wuhuizuo, @Lloyd-Pottiger) ## [1.16.2] 2025-04-02 ### New Features - `tiup-cluster` display will also show dashboard server URLs (#2472, @xhebox) - Add --mode=tiflash-disagg for `tiup-playground` (#2492, @breezewish) - `tiup-cluster` now accepts args config for alertmanager (#2516, @WalterWj) - `tiup-cluster` will fill node & role information in custom upgrade scripts (#2524, @xhebox) ### Fixes - Fix `tiup-playground` typo and display names for tiflash (#2454, @breezewish) - Fix # cannot be included in grafana password for `tiup-cluster` (#2468, @kaaaaaaang, @xhebox) - Fix binary name display of `tiup-playground` for pd-ms binaries (#2471, @xhebox) - In `tiup-playground`, MySQL 9.x don't need --comments (#2469, #dveeden) - Fix code typo (#2487, @rleungx) - Fix loong64 build (#2493, @xhebox) - Remove created directories after fio test in `tiup-cluster check` (#2511, @djshow832) - Remove some useless metrics for performance for `tiup-cluster` (#2523, @XuHuaiyu) ### Improvements - Add cdc server topology example for `tiup-cluster` (#2466, @wk989898) - Add tiproxy to `tiup-playground` DSN (#2474, @xhebox) - Polish the trust key warning for `tiup-cluster` and `tiup mirror` (#2475, @breezewish) - Update supported OS version for `tiup-cluster` check (#2476, @dveeden) - add warning for deletion of data dir for `tiup-playground` (#2481, @xhebox, @bb7133) - `tiup-cluster` will now display grafana, and also grafana patched (#2483, @WalterWj) - Improve error of missing tar for `tiup-cluster` (#2499, @dveeden) - Reword for `tiup uninstall` (#2488, @makocchi-git) - Code cleanup, dependency sysinfo update (#2500, @dveeden) - Fix message for net.core.somaxconn check in `tiup-cluster` (#2495, @dveeden) - Code cleanup, fix code misc spelling (#2504, @ottok) - Add `tmpdir` option for `tiup-cluster` (#2505, @dveeden) - Code cleanup, unify main deps with client component deps (#2517, @dveeden) ## [1.16.1] 2024-10-31 ### New Features - Deploy DM in `tiup-playground` (#2465, @GMHDBJD, @siddontang) ### Fixes - Fix several racings during download, do not upgrade nightly automatically in `tiup-cluster` (#2458, @xhebox) - Fix possible panic when `tiup-playground` failed to start (#2457, @xhebox) - Respect `component_versions` when `tiup-cluster` scales (#2451, @djshow832) - Code clean: replace 'math/rand' with 'crypto/rand' (#2455, @bb7133) - Fix tiup cannot update itself when tiup component exist (#2443, @nexustar) - Do not check HTTP port for TiFlash 7.1.0 or above (#2440, @Lloyd-Pottiger) - Also hide other password args (#2436, @xhebox) ### Improvements - Avoid unnecessary primary transfer for pdms mode in `tiup-cluster` (#2414, @HuSharp) - Add `--port-offset` for `tiup-playground` to start multiple instances without port conflicts (#2453, @breezewish) - Start with a name for pdms mode in `tiup-cluster` and `tiup-playground` (#2438, #2446, #2447, @HuSharp) - Remove prometheus systemd and interrupts collector (#2445, @xhebox) - Add example of setting the tiflash-proxy topology (#2444, @JaySon-Huang) - Improve cluster restart messaging for `tiup-cluster` (#2442, @zph) - Print version when filling `tiup-playground --xx.binpath` (#2334, @HuSharp) - Only display `--comments` when needed for `tiup-playground` (#2314, @dveeden) ## [1.16.0] 2024-06-27 ### New Features - Deploy PD as microservices in `tiup-cluster` (#2377, @rleungx) ### Fixes - Fix missing TiProxy session certs when `tiup-cluster` scale-out (#2432, @djshow832) ### Improvements - Remove tombstone nodes of pd when executing `tiup-cluster prune` (#2044, @nexustar) - Redact `-p` (password) arguments in command execution (#2425, @nexustar) - Avoid duplicated downloads when upgrade (#2423, @nexustar) ## [1.15.2] 2024-05-28 ### Improvements - Remove LGPL dependency ## [1.15.1] 2024-04-11 ### Fixes - Fix cannot choose cluster version in `tiup-playground` (#2397 #2402, @nexustar) - Fix wrong TiFlash numa config in `tiup-cluster` (#2401, @gengliqi) ### Improvements - Wait for TSO component ready in `tiup-playground` (#2349, @HuSharp) - Improve error handling of mirror renew in `tiup` (#2400, @dveeden) ## [1.15.0] 2024-04-01 ### New Features - Support no-sudo mode in `tiup-cluster` (#2350 #2373, @Yujie-Xie) - Add tidb-cse mode and remove tidb-disagg mode in `tiup-playground` (#2386, @breezewish) ### Fixes - Fix can not set runtime config in config file specific by --tiflash.config in `tiup-playground` (#2346 #2383, @Lloyd-Pottiger) - Fix not sanitize tiproxy component config when scale in in `tiup-playground` (#2365, @xhebox) - Fix timeout when checking component upgrade in `tiup` (#2379, @KanShiori) - Fix pdms grafana display in `tiup-playground` (#2382, @HuSharp) - Fix not update prometheus config when scale in in `tiup-cluster` (#2387, @Yujie-Xie) ### Improvements - Set the TiFlash logger level to DEBUG in `tiup-playground` (#2346, @Lloyd-Pottiger) - Add integration tests for tiproxy in `tiup-cluster` (#2371, @xhebox) - Set tiproxy addr without schema in `tiup-playground` (#2368, @xhebox) - Only pull nightly from cluster version in `tiup-playground` (#2364, @xhebox) - Skip tiproxy download & copy when upgrade in `tiup-cluster` (#2366, @xhebox) - Auto set session certs if there is a tiproxy in `tiup-cluster` (#2374, @xhebox) - Auto generate self-signed certs for TiProxy session migration in `tiup-playground` (#2372, @xhebox) - Remove resource manager in `tiup-playground` (#2381, @rleungx) - Add config advertise-addr to tiproxy spec in `tiup-cluster` and `tiup-playground` (#2392, @djshow832) ## [1.14.1] 2024-01-12 ### Fixes - Fix tiproxy will restart even if version is the same in `tiup-cluster` (#2358, @xhebox) - Fix that empty label is not allowed in Prometheus in `tiup-cluster` (#2349, @borissavelev) - Fix can not set runtime config in config file & set TiFlash logger level to debug in `tiup-playground` (#2346, @Lloyd-Pottiger) - Fix TLS configs for tiproxy in `tiup-cluster` (#2356, @xhebox) - Fix cannot clone without global version in `tiup` (#2359, @nexustar) - Fix tiproxy config and version in `tiup-cluster` (#2354, @xhebox) - TiProxy needs to override usercfg in `tiup-playground` (#2342, @xhebox) - Fix the start script for alertmanager when enabling numa in `tiup-cluster` and `tiup-dm` (#2337, @KanShiori) - Fix metric targets will be overrided in `tiup-playground` (#2324, @xhebox) ### Improvements - Change typo waitting to waiting in output messages for pdapi.go in `tiup-cluster` (#2330, @guangleibao) - Add check for rocky linux in `tiup-cluster` (#2333, @dveeden) - Fallback to default version when componen specific version is empty in `tiup-playground` (#2344, @xhebox) ## [1.14.0] 2023-11-17 ### New Features - Support use different component versions in `tiup-cluster` (#2010 #2264 #2306, @nexustar) - Add global listen_host config in `tiup-cluster` and `tiup-dm` (#2303, @nexustar) - Add global component_sources config in `tiup-cluster` and `tiup-dm` (#2320, @nexustar) - Support TiDB upgrade API to automatically pause DDL when upgrade in `tiup-cluster`(#2287 #2289, @nexustar) - Support TiProxy in `tiup-cluster` (#2271, @xhebox) - Support scheduling service in `tiup-playground` (#2273, @rleungx) - Support to show numa node when display in `tiup-cluster` and `tiup-dm` (#2295 #2312, @KanShiori) ### Fixes - Make sure to signal tiproxy to stop in `tiup-playground` (#2318, @dveeden) - Fix "clone" operation may be missing packages without throwing an error in `tiup` (#2309 #2311, @nexustar) - Not cache the output of ss -tln command when checking wether components are started/stopped in `tiup-cluster` and `tiup-dm` (#2298, @KanShiori) - Fix tidb-dashboard listen_host in `tiup-cluster` (#2304, @nexustar) - Fix tiproxy metrics addr in `tiup-playground` (#2299, @xhebox) ### Improvements - Upgrade go-sql-driver/mysql version to v1.7.1 (#2246, @srstack) - Use release version of TiProxy instead of nightly (#2305, @nexustar) - Use port to probe TiDB in `tiup-playground` (#2296, @KanShiori) - Add mcs name for pd log in `tiup-playground` (#2310, @HuSharp) - Not overwrite real error in retry function (#2290, @nexustar) - Fix alignment of output in `tiup-playground` (#2313, @dveeden) - ci improve (#2301 #2308 #2316, @nexustar @dveeden) ## [1.13.1] 2023-09-25 ### Fixes - Increase timeout when publish package in `tiup` (#2269, @nexustar) - Fix pd microservice component id in `tiup-playground` (#2272, @iosmanthus) - Fix grafana for multiple instances using same host in `tiup-cluster` and `tiup-dm` (#2277, @lastincisor) - Add cdn workaround (#2285, @nexustar) - Mirror: fix progress bar is not accurate (#2284, @nexustar) ### Improvement - Support ignore version check when upgrade in `tiup-cluster` and `tiup-dm` (#2282, @nexustar) ## [1.13.0] 2023-08-26 ### New Features - Support run components without tiup in `tiup` (#2265, @nexustar) - Support tiproxy in `tiup-playground` (#2202, @xhebox) - Show warning and continue if cannot delete data dir when destroy/scale-in instead of error and exit in `tiup-cluster` (#2256, @nexustar) - Support specifying package name of components in `tiup-cluster` (#2236, @nexustar) - playground: support microservices mode in `tiup-playground` (#2226, @rleungx) ### Fixes - Fix the issue meta backup is failed when symbolic links exist in meta dir in `tiup-cluster` (#2255, @@KanShiori) - Fix node filter on exec in `tiup-cluster` (#2251, @nexustar) ### Improvements - Move to Go 1.21 (#2249, @dveeden) - Upgrade TiCDC before TiKV and PD when cluster is equal or greater than v5.1.0 in `tiup-cluster` (#2253, @KanShiori) - progress: Add option to add error detail (#2203, @dveeden) ## [1.12.5] 2023-7-17 ### Fix - Fix cannot start tiflash above v7.1.0 in `tiup-cluster` (#2230, @zanmato1984) ## [1.12.4] 2023-7-13 ### Fix - Fix cannot show tiflash uptime in `tiup-cluster` (#2227, @nexustar) ### Improvement - Remove tcp_port for tiflash in `tiup-cluster` and `tiup-playground` (#2220, @zanmato1984) ## [1.12.3] 2023-6-14 ### Fixes - Fix cannot edit manage_host on an exist cluster in `tiup-cluster` (#2210, @nexustar) - Fix still use host instead of manage_host in `tiup-cluster` (#2206 #2207, @nexustar) ### Improvement - Check if the compnoent exists when uninstall in `tiup` (#2209, @srstack) ## [1.12.2] 2023-5-19 ### Notes - Chore: migrate to new prow reviewing (#2160, @wuhuizuo) - Publish nightly tiup packages based on master branch (#2192, @nexustar) ### Fixes - Fix cannot start alertmanager in `tiup-cluster` (#2200, @nexustar) - Fix TiFlash config not working in `tiup-playground` (#2190, @breezewish) - Fix --tiflash 0 not working in `tiup-playground` (#2189, @breezewish) - Fix port conflict when cluster version >= 5.4 and < 7.1 in `tiup-playground` (#2188, @Lloyd-Pottiger) - Fix cannot prune/scale-in tikv when use ipv6 in `tiup-cluster` (#2180, @nexustar) - Fix exec error when use ipv6 in `tiup-cluster` (#2193, @nexustar) ### Improvements - Output PD endpoints in tikv-slim mode in `tiup-playground` (#2196, @breezewish) - Add --mode=tidb-disagg in `tiup-playground` (#2194, @breezewish) - Add option to specify TiKV port in `tiup-playground` (#2183, @dveeden) - include '--comments' in mysql command line arguments in `tiup-playground` (#2187, @mjonss) - Code refine for TiFlash in `tiup-playground` (#2177, @breezewish) - Code refine for args in `tiup-playground` (#2178, @breezewish) ## [1.12.1] 2023-4-13 ### Fix - Fix ngm config in `tiup-cluster` (#2175, @Mystery-cyf) ### Improvements - Remove TiFlash http service in `tiup-cluster` and `tiup-playground` (#2165, Lloyd-Pottige) - Remove ":" on default backup filename in `tiup-cluster` (#2174, @nexustar) - Allows setting other storage config for TiFlash when `storage.main` is not defined in `tiup-cluster` (#2161, @breezewish) - Support run TiFlash via args in `tiup-playground` (#2162, @breezewish) - Tidy output in `tiup-playground` (#2163, @breezewish) ## [1.12.0] 2023-3-31 ### New Features - Support ipv6 in `tiup-cluster` and `tiup-dm` (#2068 #2069 #2070 #2075 #2117, @nexustar) - Add support for displaying the grafana URLs in JSON output in `tiup-cluster` and `tiup-dm` (#2041, @onlyacat) - Allow download directory when use builtin scp (#2098, @nexustar) - Support set manage host in `tiup-cluster` and `tiup-dm` (#2147, @srstack) - User perm from parent dir in `tiup-cluster` (#2143, @nexustar) - Support display tiflash uptime in `tiup-cluster` (#2094, @srstack) - Add ticdc port prob config in `tiup-cluster` (#2124, @sdojjy) - Allow users to run custom shell commands during the cluster rolling upgrade in `tiup-cluster` (#2130, @darkelf21cn) - Check upgrade offline in `tiup-cluster` (#2116, @srstack) - Check required CPU flags for TiFlash >= v6.3.0 in `tiup-cluster` (#2054, @solotzg) - Support connect to external Pushgateway in `tiup-cluster` (#2137, @nexustar) - Allow setting the TiCDC port in `tiup-playground` (#2140, @dveeden) ### Improvements - Check the cdc capture can be found before drain the capture in `tiup-cluster` (#2059, @3AceShowHand) - Enable AutoTLS by default in `tiup-playground` (#2077, @dveeden) - Wait for leader to transfer back during rolling restart in `tiup-cluster` (#2051, @cosven) - Sync pdapi & typeutil with pd 6.3.0 in `tiup-cluster` (#2063, @AstroProfundis) - Optimize checks & tests (#2056, @AstroProfundis) - Mark openEuler as supported in `tiup-cluster` (#2145, @nexustar) - Merge tiflash config in tiup playground to reduce handwrite config in `tiup-playground` (#2086, @iosmanthus) - Add TIUP_VERBOSE in doc (#2111, @dveeden) - Delete storage.remote.cache.dir when prune in `tiup-cluster` (#2142, @hehechen) - Check process name before killing it in `tiup clean` (#2141, @dveeden) - Display disaggregated mode of TiFlash in `tiup-cluster` (#2133, @hehechen) - Remove mark_cache_size default value of tiflash.toml after v5.4.0 (#2138, @hongyunyan) - Delete !#$%&= from init password in `tiup-cluster` (#2136, @nexustar) ## [1.11.3] 2023-02-14 ### Improvement - Disable telemetry by default ([#2119](https://github.com/pingcap/tiup/pull/2119), [@nexustar](https://github.com/nexustar)) ## [1.11.2] 2023-02-02 ### Fixes - base52: Check for invalid chars in `Decode()` ([#2103](https://github.com/pingcap/tiup/pull/2103), [@AstroProfundis](https://github.com/AstroProfundis)) ### Improvements - Update upgrade message in `tiup-cluster` ([#2110](https://github.com/pingcap/tiup/pull/2110), [@nexustar](https://github.com/nexustar)) - Comment out tidb-dashboard in template in `tiup-cluster` ([#2112](https://github.com/pingcap/tiup/pull/2112), [@nexustar](https://github.com/nexustar)) - Warn for cleanup failures due to missing datadir in `tiup clean` ([#2105](https://github.com/pingcap/tiup/pull/2105), [@dveeden](https://github.com/dveeden)) - Improve tiup timeout message in `tiup` ([#2109](https://github.com/pingcap/tiup/pull/2109), [@nexustar](https://github.com/nexustar)) ## [1.11.1] 2022-11-24 ## Note `tiup bench` has been **moved** to https://github.com/PingCAP-QE/tiup-bench and not release with tiup ### Fixes - Set `madvdontneed=1` for PD to shrink RSS after GC in `tiup-cluster` ([#2019](https://github.com/pingcap/tiup/pull/2019), [@lhy1024](https://github.com/lhy1024)) - Fix playground may panic when component fail to start in `tiup-playground` ([#2076](https://github.com/pingcap/tiup/pull/2076), [@nexustar](https://github.com/nexustar)) ### Improvements - Support configuring `scrape_interval` and `scrape_timeout` for prometheus in `tiup-cluster` ([#2071](https://github.com/pingcap/tiup/pull/2071), [@nexustar](https://github.com/nexustar)) - Remove the restriction that tikv num must be greater than 0 in `tiup-playground` ([#2073](https://github.com/pingcap/tiup/pull/2073), [@lilinghai](https://github.com/lilinghai)) - Mark kylin v10 as supported OS in `tiup-cluster` ([#2079](https://github.com/pingcap/tiup/pull/2079), [@nexustar](https://github.com/nexustar)) - Set `ETCDCTL_API=v3` to get the v3 API in `tiup-ctl` ([#2081](https://github.com/pingcap/tiup/pull/2081), [@dveeden](https://github.com/dveeden)) - Avoid redirect when grafana `root_url` is set in `tiup-cluster` ([#2082](https://github.com/pingcap/tiup/pull/2082), [@nexustar](https://github.com/nexustar)) ## [1.11.0] 2022-09-23 ### New Features - Add support of new component `tikv-cdc` for `tiup-cluster` and `tiup-playground` ([#2000](https://github.com/pingcap/tiup/pull/2000), [#2022](https://github.com/pingcap/tiup/pull/2022), [@pingyu](https://github.com/pingyu)) - Add support of dedicated `tidb-dashboard` in `tiup-cluster` ([#2017](https://github.com/pingcap/tiup/pull/2017), [@nexustar](https://github.com/nexustar)) - Add support of TiCDC rolling upgrade for `tiup-cluster` ([#1996](https://github.com/pingcap/tiup/pull/1996), [#2005](https://github.com/pingcap/tiup/pull/2005), [#2036](https://github.com/pingcap/tiup/pull/2036), [@3AceShowHand](https://github.com/3AceShowHand)) - Add support to config TiCDC cluster-id for `tiup-cluster` ([#2042](https://github.com/pingcap/tiup/pull/2042), [@nexustar](https://github.com/nexustar)) - Add support to set CPUAffinity in `tiup-cluster` ([#2007](https://github.com/pingcap/tiup/pull/2007), [@YaozhengWang](https://github.com/YaozhengWang)) - Allow to display memory usage in `tiup-cluster` ([#1994](https://github.com/pingcap/tiup/pull/1994), [@nexustar](https://github.com/nexustar)) ### Fixes - Fix tmp file not deleted when upload package in `tiup-server` ([#2021](https://github.com/pingcap/tiup/pull/2021), [@nexustar](https://github.com/nexustar)) - Fix redundant log when start TiDB cluster with `tiup-playground` ([#2032](https://github.com/pingcap/tiup/pull/2032), [@nexustar](https://github.com/nexustar)) - Fix panic when fail to start component in `tiup-playground` ([#1933](https://github.com/pingcap/tiup/pull/1933), [@dveeden](https://github.com/dveeden)) - Fix scale-out cdc command in `tiup-playground` ([#1935](https://github.com/pingcap/tiup/pull/1935), [@lonng](https://github.com/lonng)) - Fix ineffectiveness of ticdc.config in `tiup-playground` ([#1978](https://github.com/pingcap/tiup/pull/1978), [@pingyu](https://github.com/pingyu)) - Fix timezone check and remove duplicate cleanTasks in `tiup-cluster` ([#2045](https://github.com/pingcap/tiup/pull/2045), [@nexustar](https://github.com/nexustar)) ### Improvements - Use test-cluster as dashboard name in `tiup-playground` ([#1920](https://github.com/pingcap/tiup/pull/1920), [@breezewish](https://github.com/breezewish)) - Add pd.port argument in `tiup-playground` ([#1931](https://github.com/pingcap/tiup/pull/1931), [@pingyu](https://github.com/pingyu)) - Allow --tag argument on any locate in `tiup-playground` ([#1998](https://github.com/pingcap/tiup/pull/1998), [@pingyu](https://github.com/pingyu)) ## [1.10.3] 2022-08-11 ### Mirror update - Add new version for node_exporter (https://github.com/prometheus/node_exporter/releases/tag/v1.3.1) and blackbox_exporter (https://github.com/prometheus/blackbox_exporter/releases/tag/v0.21.1) in tiup repository. All the new tidb clusters or instances deployed by tiup cluster will use the new version by default. ### Fixes - Fix cannot clean related tidb topology after scale-in in `tiup-cluster` ([#2011](https://github.com/pingcap/tiup/pull/2011), [@nexustar](https://github.com/nexustar)) - Fix fail to push if server name has "-" in `tiup-cluster` ([#2008](https://github.com/pingcap/tiup/pull/2008), [@nexustar](https://github.com/nexustar)) - Fix unable to configure tiflash LearnerConfig in `tiup-cluster` ([#1991](https://github.com/pingcap/tiup/pull/1991), [@srstack](https://github.com/srstack)) ### Improvements - Improve the THP check rule in `tiup-cluster` ([#2014](https://github.com/pingcap/tiup/pull/2014), [@nexustar](https://github.com/nexustar)) - Add an example in -h for `tiup mirror clone` for multiple versions ([#2009](https://github.com/pingcap/tiup/pull/2009), [@nexustar](https://github.com/nexustar)) ## [1.10.2] 2022-06-16 ### Fixes - Fix cannot get drainer status from pd in `tiup-cluster` ([#1922](https://github.com/pingcap/tiup/pull/1922), [@srstack](https://github.com/srstack)) - Fix error when check time zone in `tiup-cluster` ([#1925](https://github.com/pingcap/tiup/pull/1925), [@nexustar](https://github.com/nexustar)) - Fix wrong parameter value of --peer-urls in `tiup-dm` ([#1926](https://github.com/pingcap/tiup/pull/1926), [@nexustar](https://github.com/nexustar)) ## [1.10.1] 2022-06-09 ### Fix - Fix SSH login error when identity file is specified for non-root user in `tiup-cluster` ([#1914](https://github.com/pingcap/tiup/pull/1914), [@srstack](https://github.com/srstack)) ## [1.10.0] 2022-06-08 ### New Features - Add support of backup and restore the cluster metadata for `tiup-cluster` and `tiup-dm` ([#1801](https://github.com/pingcap/tiup/pull/1801), [@nexustar](https://github.com/nexustar)) - Add `history` command for `tiup` to display component execution records ([#1808](https://github.com/pingcap/tiup/pull/1808), [@srstack](https://github.com/srstack)) - Add support of trying to disable swap when `check --apply` in `tiup-cluster` ([#1803](https://github.com/pingcap/tiup/pull/1803), [@AstroProfundis](https://github.com/AstroProfundis)) - Add Grafana URL in `display` output of `tiup-cluster` ([#1819](https://github.com/pingcap/tiup/pull/1819), [@Smityz](https://github.com/Smityz)) - Add a `latest` alias for component versions when cloning repo with `tiup mirror clone` command ([#1835](https://github.com/pingcap/tiup/pull/1835), [@srstack](https://github.com/srstack)) - Add Kylin Linux 10+ as supported in `check` result of `tiup-cluster` ([#1886](https://github.com/pingcap/tiup/pull/1886), [@srstack](https://github.com/srstack)) - Add support of completion of cluster name with Tab button for `tiup-cluster` ([#1891](https://github.com/pingcap/tiup/pull/1891), [@nexustar](https://github.com/nexustar)) - Add support of checking timezone consistency among servers in `check` command of `tiup-cluster` ([#1890](https://github.com/pingcap/tiup/pull/1890), [@nexustar](https://github.com/nexustar)) - Add support of deploying on RHEL 8 in `tiup-cluster` ([#1896](https://github.com/pingcap/tiup/pull/1896), [@nexustar](https://github.com/nexustar)) - Add support of specifying custom key directory when rotating `root.json` in `tiup mirror` command ([#1848](https://github.com/pingcap/tiup/pull/1848), [@AstroProfundis](https://github.com/AstroProfundis)) ### Fixes - Fix typo in error message of `tiup-bench` ([#1824](https://github.com/pingcap/tiup/pull/1824), [@Mini256](https://github.com/Mini256)) - Fix duplicated component path printed in `tiup` ([#1832](https://github.com/pingcap/tiup/pull/1832), [@nexustar](https://github.com/nexustar)) - Fix outdated URL in topology example for `tiup-cluster` ([#1840](https://github.com/pingcap/tiup/pull/1840), [@srstack](https://github.com/srstack)) - Fix DM startup scripts to bind `0.0.0.0` instead of host IP ([#1845](https://github.com/pingcap/tiup/pull/1845), [@nexustar](https://github.com/nexustar)) - Fix incorrect blackbox_exporter, node_exporter and Grafana status monitor for TLS enabled clusters ([#1853](https://github.com/pingcap/tiup/pull/1853), [@srstack](https://github.com/srstack)) - Fix priority of tag argument for `tiup-playground` ([#1869](https://github.com/pingcap/tiup/pull/1869), [@nexustar](https://github.com/nexustar)) - Fix `TIUP_HOME` not loaded correctly on initializing metadata for some components ([#1885](https://github.com/pingcap/tiup/pull/1885), [@srstack](https://github.com/srstack)) - Fix concurrent error in `display` command of `tiup-cluster` ([#1895](https://github.com/pingcap/tiup/pull/1895), [@srstack](https://github.com/srstack)) - Fix incorrect workload loading in `tiup-bench` ([#1827](https://github.com/pingcap/tiup/pull/1827), [@Smityz](https://github.com/Smityz)) - Fix OS type detection for hybrid platform deployment in `tiup-cluster` ([#1753](https://github.com/pingcap/tiup/pull/1753), [@srstack](https://github.com/srstack)) ### Improvements - Add notes about default workload values in help message of `tiup-bench` ([#1807](https://github.com/pingcap/tiup/pull/1807), [@Smityz](https://github.com/Smityz)) - Refactor `-h/--help` handling to avoid conflicts with component arguments ([#1831](https://github.com/pingcap/tiup/pull/1831), [@nexustar](https://github.com/nexustar)) - Refactor version specific handlings of TiDB cluster to a dedicated Go package ([#1873](https://github.com/pingcap/tiup/pull/1873), [@nexustar](https://github.com/nexustar)) - Improve integrate tests for `tiup-cluster` ([#1882](https://github.com/pingcap/tiup/pull/1882), [@nexustar](https://github.com/nexustar)) - Adjust help information of `edit-cluster` command for `tiup-cluster` and `tiup-dm` ([#1900](https://github.com/pingcap/tiup/pull/1900), [@nexustar](https://github.com/nexustar)) - Update configuration example of monitoring components ([#1818](https://github.com/pingcap/tiup/pull/1818), [@glkappe](https://github.com/glkappe); [#1843](https://github.com/pingcap/tiup/pull/1843), [@nexustar](https://github.com/nexustar)) - Improve cluster shutting down process in `playground` ([#1893](https://github.com/pingcap/tiup/pull/1893), [@nexustar](https://github.com/nexustar)) ## [1.9.6] 2022-05-20 ### Fix - Fix incorrect output of `display` in certain circumstances for `tiup-cluster` ([#1889](https://github.com/pingcap/tiup/pull/1889), [@srstack](https://github.com/srstack)) ## [1.9.5] 2022-05-10 ### Fixes - Fix `prune` incorrectly destroy pump/drainer node before they become `Tombstone` in `tiup-cluster` ([#1851](https://github.com/pingcap/tiup/pull/1851), [@srstack](https://github.com/srstack)) - Report error when multiple pump nodes with the same `ip:port` found in `tiup-cluster` ([#1856](https://github.com/pingcap/tiup/pull/1856), [@srstack](https://github.com/srstack)) - Get node status of pump/drainer from PD in `tiup-cluster` ([#1862](https://github.com/pingcap/tiup/pull/1862), [@srstack](https://github.com/srstack)) ### Improvements - Check node status concurrently and support custom timeout for `display` in `tiup-cluster` ([#1867](https://github.com/pingcap/tiup/pull/1867), [@srstack](https://github.com/srstack)) - Support `tidb-lightning` in `tiup-ctl` ([#1863](https://github.com/pingcap/tiup/pull/1863), [@nexustar](https://github.com/nexustar)) ## [1.9.4] 2022-04-12 ### Fixes - Fix copy error when file is read only in `tiup-playground` ([#1816](https://github.com/pingcap/tiup/pull/1816), [@breeswish](https://github.com/breeswish)) - Fix `data-dir` not properly handled for TiCDC v6.0.0 in `tiup-cluster` ([#1838](https://github.com/pingcap/tiup/pull/1838), [@overvenus](https://github.com/overvenus)) ## [1.9.3] 2022-03-24 ### Fixes - Fix error running `exec` subcommand of `tiup-cluster` when hostname contains '-' ([#1794](https://github.com/pingcap/tiup/pull/1794), [@nexustar](https://github.com/nexustar)) - Fix port conflict check for TiFlash instances in `tiup-cluster` ([#1805](https://github.com/pingcap/tiup/pull/1805), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix next-generation monitor (`ng-monitor`) not available in Prometheus ([#1806](https://github.com/pingcap/tiup/pull/1806), [@nexustar](https://github.com/nexustar)) - Fix node_exporter metrics not collected if the host has only Prometheus deployed ([#1806](https://github.com/pingcap/tiup/pull/1806), [@nexustar](https://github.com/nexustar)) - Fix `--host 0.0.0.0` not working in `tiup-playground` ([#1811](https://github.com/pingcap/tiup/pull/1811), [@nexustar](https://github.com/nexustar)) ### Improvements - Support cleanup audit log files for `tiup-cluster` and `tiup-dm` ([#1780](https://github.com/pingcap/tiup/pull/1780), [@srstack](https://github.com/srstack)) - Add anonymous login example to Grafana configuration templates ([#1785](https://github.com/pingcap/tiup/pull/1785), [@sunzhaoyang](https://github.com/sunzhaoyang)) ## [1.9.2] 2022-03-10 ### Fixes - Fix next-generation monitor (`ng-monitor`) is not started by default for nightly versions in `tiup-cluster` ([#1760](https://github.com/pingcap/tiup/pull/1760), [@nexustar](https://github.com/nexustar)) - Fix the `--ignore-config-check` argument not working during deploy process in `tiup-cluster` ([#1774](https://github.com/pingcap/tiup/pull/1774), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix incorrect `initial-commit-ts` config for drainer in `tiup-cluster` ([#1776](https://github.com/pingcap/tiup/pull/1776), [@nexustar](https://github.com/nexustar)) - Fix symbolic link handling when decompressing packages ([#1784](https://github.com/pingcap/tiup/pull/1784), [@nexustar](https://github.com/nexustar)) ### Improvements - Check for inactive Prometheus service before `reload` in `tiup-cluster` ([#1775](https://github.com/pingcap/tiup/pull/1775), [@nexustar](https://github.com/nexustar)) - Mark Oracle Linux as supported OS in `check` result of `tiup-cluster` ([#1786](https://github.com/pingcap/tiup/pull/1786), [@srstack](https://github.com/srstack)) ## [1.9.1] 2022-02-24 ### Fixes - Fix panic running TPCC with `tiup-bench` ([#1755](https://github.com/pingcap/tiup/pull/1755), [@nexustar](https://github.com/nexustar)) - Fix blackbox_exporter and node_exporter not restarted during upgrade in `tiup-cluster` and `tiup-dm` ([#1758](https://github.com/pingcap/tiup/pull/1758), [@srstack](https://github.com/srstack)) - Fix messed `stdout` and `stderr` handling for SSH commands in `tiup-cluster` and `tiup-dm` ([#1763](https://github.com/pingcap/tiup/pull/1763), [@tongtongyin](https://github.com/tongtongyin)) - Fix Grafana datasource config handling in `tiup-cluster` and `tiup-dm` ([#1768](https://github.com/pingcap/tiup/pull/1768), [@srstack](https://github.com/srstack)) ## [1.9.0] 2022-02-10 ### New Features - Enable next-generation monitor (`ng-monitor`) by default for TiDB versions equal or later than `v5.4.0` in `tiup-cluster` ([#1699](https://github.com/pingcap/tiup/pull/1699) [#1743](https://github.com/pingcap/tiup/pull/1743), [@nexustar](https://github.com/nexustar)) - Add support of enabling and disabling TLS encryption for deployed TiDB cluster in `tiup-cluster` ([#1657](https://github.com/pingcap/tiup/pull/1657), [@srstack](https://github.com/srstack)) - Add support of deploying TLS enabled DM clusters in `tiup-dm` ([#1745](https://github.com/pingcap/tiup/pull/1745), [@nexustar](https://github.com/nexustar)) - Add support of changing owner of a component in `tiup mirror` and `tiup-server` ([#1676](https://github.com/pingcap/tiup/pull/1676), [@AstroProfundis](https://github.com/AstroProfundis)) - Add support of specifying IP address to bind for AlertManager in `tiup-cluster` ([#1665](https://github.com/pingcap/tiup/pull/1665) [#1669](https://github.com/pingcap/tiup/pull/1669), [@srstack](https://github.com/srstack)) - Add support of initialing random root password for TiDB in `tiup-cluster` ([#1700](https://github.com/pingcap/tiup/pull/1700), [@AstroProfundis](https://github.com/AstroProfundis)) - Add support of `check` before scaling out a cluster in `tiup-cluster` ([#1659](https://github.com/pingcap/tiup/pull/1659), [@srstack](https://github.com/srstack)) - Add support of customizing Grafana configurations in `server_configs` section in `tiup-cluster` and `tiup-dm` ([#1703](https://github.com/pingcap/tiup/pull/1703), [@nexustar](https://github.com/nexustar)) - Add support of Chrony as valid NTP daemon for `check` in `tiup-cluster` ([#1714](https://github.com/pingcap/tiup/pull/1714), [@srstack](https://github.com/srstack)) - Add Amazon Linux 2 as supported OS for `check` in `tiup-cluster` ([#1740](https://github.com/pingcap/tiup/pull/1740), [@dveeden](https://github.com/dveeden)) - Add significant warning destroying a cluster in `tiup-cluster` and `tiup-dm` ([#1723](https://github.com/pingcap/tiup/pull/1723), [@AstroProfundis](https://github.com/AstroProfundis)) ### Fixes - Fix DM hosts not added to node_exporter list of Prometheus configuration in `tiup-dm` ([#1654](https://github.com/pingcap/tiup/pull/1654), [@AstroProfundis](https://github.com/AstroProfundis)) - Adjust command argument of `tiup` to workaround conflict with some components ([#1698](https://github.com/pingcap/tiup/pull/1698), [@nexustar](https://github.com/nexustar)) - Fix global configs not correctly set for new instances during scaling out in `tiup-cluster` ([#1701](https://github.com/pingcap/tiup/pull/1701), [@srstack](https://github.com/srstack)) - Fix incorrect `initial_commit_ts` set in start up script of Drainer in `tiup-cluster` ([#1706](https://github.com/pingcap/tiup/pull/1706), [@nexustar](https://github.com/nexustar)) - Fix JSON output for `check` results in `tiup-cluster` ([#1720](https://github.com/pingcap/tiup/pull/1720), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix incorrect instance status for `display` in `tiup-cluster` ([#1742](https://github.com/pingcap/tiup/pull/1742), [@nexustar](https://github.com/nexustar)) - Fix malformed commands in local executor in `tiup-cluster` ([#1734](https://github.com/pingcap/tiup/pull/1734), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix incorrect exit code for `tiup` ([#1738](https://github.com/pingcap/tiup/issues/1738), [@nexustar](https://github.com/nexustar)) - Remove duplicate `check` results in `tiup-cluster` ([#1737](https://github.com/pingcap/tiup/pull/1737), [@srstack](https://github.com/srstack)) - Fix version check of TiFlash nightly builds for TLS enabled clusters in `tiup-cluster` ([#1735](https://github.com/pingcap/tiup/pull/1735), [@srstack](https://github.com/srstack)) ### Improvements - Adjust configuration template for TiFlash to support new versions in `tiup-cluster` ([#1673](https://github.com/pingcap/tiup/pull/1673), [@hehechen](https://github.com/hehechen)) - Adjust configuration sample for DM in `tiup-dm` ([#1692](https://github.com/pingcap/tiup/pull/1692), [@lance6716](https://github.com/lance6716)) - Reder cluster name for custom Prometheus alert rules in `tiup-cluster` ([#1674](https://github.com/pingcap/tiup/pull/1674), [@srstack](https://github.com/srstack)) - Improve shell auto-completion to support cli of components ([#1678](https://github.com/pingcap/tiup/pull/1687), [@nexustar](https://github.com/nexustar)) - Add checks for `tiup` installed with 3rd party package manager when running `tiup update --self` ([#1693](https://github.com/pingcap/tiup/issues/1693), [@srstack](https://github.com/srstack)) - Check for component updates before actually run it ([#1718](https://github.com/pingcap/tiup/pull/1718), [@nexustar](https://github.com/nexustar)) - Use latest nightly build for each component in `tiup-playground` ([#1727](https://github.com/pingcap/tiup/pull/1727), [@nexustar](https://github.com/nexustar)) ## [1.8.2] 2022-01-11 ### Fixes - Fix global configuration not inherited correctly in `scale-out` command of `tiup-cluster` ([#1701](https://github.com/pingcap/tiup/pull/1701), [@srstack](https://github.com/srstack)) - Fix errors starting `tiup-playground` in some circumstances ([#1712](https://github.com/pingcap/tiup/pull/1712) [#1715](https://github.com/pingcap/tiup/pull/1715), [@nexustar](https://github.com/nexustar)) - Fix error comparing nightly versions in `tiup-cluster` ([#1702](https://github.com/pingcap/tiup/pull/1702), [@srstack](https://github.com/srstack)) ### Improvements - Update out-of-date documents of `tiup mirror` ([#1705](https://github.com/pingcap/tiup/pull/1705), [@dveeden](https://github.com/dveeden)) ## [1.8.1] 2021-12-20 ### Fixes - Fix port conflict not checked for TiDB clusters imported from `tidb-ansible` on `scale-out` in `tiup-cluster` ([#1656](https://github.com/pingcap/tiup/pull/1656), [@srstack](https://github.com/srstack)) - Fix SSH commands stale in some circumstances ([#1664](https://github.com/pingcap/tiup/pull/1664), [@nexustar](https://github.com/nexustar)) - Fix default value of `initial-commit-ts` for drainer in `tiup-cluster` ([#1678](https://github.com/pingcap/tiup/pull/1678), [@nexustar](https://github.com/nexustar)) ### Improvements - Display check result of CPU frequency governor even when it's not available ([#1663](https://github.com/pingcap/tiup/pull/1663), [@srstack](https://github.com/srstack)) ## [1.8.0] 2021-12-06 ### New Features - Add `data-dir` support for TiCDC in `tiup-playground` ([#1631](https://github.com/pingcap/tiup/pull/1631), [@nexustar](https://github.com/nexustar)) - Add support of using custom files as input of `edit-config`, and support dumping the current full config to a file with `show-config` command in `tiup-cluster` ([#1637](https://github.com/pingcap/tiup/pull/1637), [@haiboumich](https://github.com/haiboumich)) - Add support of next-generation monitor (`ng-monitor`) in `tiup-playground` ([#1648](https://github.com/pingcap/tiup/pull/1648), [@nexustar](https://github.com/nexustar)) - Add support of inserting custom `scrape_configs` to Prometheus configs in `tiup-cluster` ([#1641](https://github.com/pingcap/tiup/pull/1641), [@nexustar](https://github.com/nexustar)) - [experimental] Support 2-staged scaling out for `tiup-cluster` ([#1638](https://github.com/pingcap/tiup/pull/1638) [#1642](https://github.com/pingcap/tiup/pull/1642), [@srstack](https://github.com/srstack)) - Scaling out of a TiDB cluster can be divided with `--stage1` and `--stage2` arguments, the stage 1 deploys files and configs but not starting the new instances, and the stage 2 actually starts the new instances and reload necessary configs - This could be useful if you want to modify config of the new instances or use a custom binary with `patch` **before** the first start of the new instances - [experimental] Implement plain text output and support custom output writer for logs ([#1646](https://github.com/pingcap/tiup/pull/1646), [@AstroProfundis](https://github.com/AstroProfundis)) ### Fixes - Fix incorrect progress bar displaying in some tasks ([#1624](https://github.com/pingcap/tiup/pull/1624), [@nexustar](https://github.com/nexustar)) - Fix incorrect argument flags in `tiup-playground` ([#1635](https://github.com/pingcap/tiup/pull/1635), [@srstack](https://github.com/srstack)) - Fix files of monitoring agents and TiDB audit log not cleaned with `clean` command of `tiup-cluster` ([#1643](https://github.com/pingcap/tiup/pull/1643) [#1644](https://github.com/pingcap/tiup/pull/1644), [@srstack](https://github.com/srstack)) - Fix confirmation prompt in `scale-out` can not be skipped with `--yes` argument in `tiup-cluster` ([#1645](https://github.com/pingcap/tiup/pull/1645), [@srstack](https://github.com/srstack)) - Fix directory conflict error in some circumstances even when node is marked as `ignore_exporter` ([#1649](https://github.com/pingcap/tiup/pull/1649), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix DM nodes not added to node_exporter target list in Prometheus config in `tiup-dm` ([#1654](https://github.com/pingcap/tiup/pull/1654), [@AstroProfundis](https://github.com/AstroProfundis)) ### Improvements - Add significant warning when `--force` argument is set for `scale-in` command in `tiup-cluster` ([#1629](https://github.com/pingcap/tiup/pull/1629), [@AstroProfundis](https://github.com/AstroProfundis)) - Add environment variables to skip topology sanity check in `scale-in` command in `tiup-cluster` ([#1627](https://github.com/pingcap/tiup/pull/1627), [@AstroProfundis](https://github.com/AstroProfundis)) - Update examples to use `--without-monitor` instead of `--monitor` for `tiup-playground` ([#1639](https://github.com/pingcap/tiup/pull/1639), [@dveeden](https://github.com/dveeden)) ## [1.7.0] 2021-11-15 ### New Features - Support deploying and managing TLS enabled TiDB cluster with TiFlash nodes ([#1594](https://github.com/pingcap/tiup/pull/1594), [@nexustar](https://github.com/nexustar)) - Support rendering template for local deployment with vairables in `tiup-cluster` and `tiup-dm` ([#1596](https://github.com/pingcap/tiup/pull/1596), [@makocchi-git](https://github.com/makocchi-git)) - [experimental] Support optionally enable next-generation monitor (`ng-monitor`) for latest TiDB releases ([#1601](https://github.com/pingcap/tiup/pull/1601), [@nexustar](https://github.com/nexustar)) - [experimental] Support JSON output format for `tiup-cluster` and `tiup-dm` ([#1617](https://github.com/pingcap/tiup/pull/1617), [@AstroProfundis](https://github.com/AstroProfundis)) ### Fixes - Remove warning about tag argument for `tiup-playground` ([#1606](https://github.com/pingcap/tiup/pull/1606), [@nexustar](https://github.com/nexustar)) - Set `--external-url` for AlertManager in `tiup-cluster` ([#1608](https://github.com/pingcap/tiup/pull/1608), [@reAsOn2010](https://github.com/reAsOn2010)) - Fix auto detecting of system arch fail in certain circumstances ([#1610](https://github.com/pingcap/tiup/pull/1610), [@AstroProfundis](https://github.com/AstroProfundis)) ### Improvements - Support getting cluster ID from PD in `pdapi` package ([#1573](https://github.com/pingcap/tiup/pull/1573) [#1574](https://github.com/pingcap/tiup/pull/1574), [@nexustar](https://github.com/nexustar); [#1580](https://github.com/pingcap/tiup/pull/1580), [@AstroProfundis](https://github.com/AstroProfundis)) - Accurately get status of TiFlash nodes during operations ([#1600](https://github.com/pingcap/tiup/pull/1600), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.6.1] 2021-10-21 ### Fixes - Fix `tiup-bench` reporting wrong latency for TPCC workloads ([#1577](https://github.com/pingcap/tiup/pull/1577), [@lobshunter](https://github.com/lobshunter)) - Fix test cases for `tiup-bench` and `tiup-client` ([#1579](https://github.com/pingcap/tiup/pull/1579), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix fetching component manifest error on certain circumstances ([#1581](https://github.com/pingcap/tiup/pull/1581), [@nexustar](https://github.com/nexustar)) ## [1.6.0] 2021-10-09 ### New Features - Add support of using `ssh-agent` auth socket in `tiup-cluster` ([#1416](https://github.com/pingcap/tiup/pull/1416), [@9547](https://github.com/9547)) - Add parallel task concurrency control in `tiup-cluster` and `tiup-dm` with `-c/--concurrency` argument ([#1420](https://github.com/pingcap/tiup/pull/1420), [@AstroProfundis](https://github.com/AstroProfundis)) - The default value of max number of parallel tasks allowed is 5, this feature could help users managing very large clusters to avoid connection errors on operations. - Add the ability to detect CPU arch for deployment servers automatically in `tiup-cluster` and `tiup-dm` if not set by user ([#1423](https://github.com/pingcap/tiup/pull/1423), [@9547](https://github.com/9547)) - Add `renew` subcommand for `tiup mirror` to extend the expiration date of component manifest ([#1479](https://github.com/pingcap/tiup/pull/1479), [@AstroProfundis](https://github.com/AstroProfundis)) - Add the ability to ignore monitor agents for specific instances in `tiup-cluster` ([#1492](https://github.com/pingcap/tiup/pull/1492), [@AstroProfundis](https://github.com/AstroProfundis)) - Add `--force` argument for `prune` subcommand in `tiup-cluster` ([#1552](https://github.com/pingcap/tiup/pull/1552), [@AstroProfundis](https://github.com/AstroProfundis)) - Add more configuration fields for Grafana in `tiup-cluster` and `tiup-dm` ([#1566](https://github.com/pingcap/tiup/pull/1566), [@haiboumich](https://github.com/haiboumich)) - [Experimental] Add support of SSH connections via proxy in `tiup-cluster` ([#1438](https://github.com/pingcap/tiup/pull/1438), [@9547](https://github.com/9547)) - Deprecate the `--monitor` argument and introduce a new `--without-monitor` argument to disable monitoring components in `tiup-playground` ([#1512](https://github.com/pingcap/tiup/pull/1512), [@LittleFall](https://github.com/LittleFall)) - Deprecate the `TIUP_WORK_DIR` environment as it's not actually been used, and make it possible for `tiup-playground` to run without `tiup` ([#1553](https://github.com/pingcap/tiup/pull/1553) [#1556](https://github.com/pingcap/tiup/pull/1556) [#1558](https://github.com/pingcap/tiup/pull/1558), [@nexustar](https://github.com/nexustar)) ### Fixes - Fix `blackbox_exporter` configs for TLS enabled clusters in `tiup-cluster` ([#1443](https://github.com/pingcap/tiup/pull/1443), [@9547](https://github.com/9547)) - Only try to apply THP fix if it's available on the deployment server in `tiup-cluster` ([#1458](https://github.com/pingcap/tiup/pull/1458), [@9547](https://github.com/9547)) - Fix sudo errors in `tiup-cluster` when devtoolset is enabled on deployment server ([#1516](https://github.com/pingcap/tiup/pull/1516), [@nexustar](https://github.com/nexustar)) - Fix test cases for `tiup-dm` ([#1540](https://github.com/pingcap/tiup/pull/1540), [@nexustar](https://github.com/nexustar)) - Fix downloading of uneeded component packages when `--binpath` is specifiedin `tiup-playground` ([#1495](https://github.com/pingcap/tiup/pull/1495), [@AstroProfundis](https://github.com/AstroProfundis); [#1545](https://github.com/pingcap/tiup/pull/1545), [@nexustar](https://github.com/nexustar)) - Fix panic when `tiup-bench` fails to connect to the database ([#1557](https://github.com/pingcap/tiup/pull/1557), [@nexustar](https://github.com/nexustar)) - Fix `numa_node` configs are not rendered into PD startup script in `tiup-cluster` ([#1565](https://github.com/pingcap/tiup/pull/1565), [@onlyacat](https://github.com/onlyacat)) - Correctly handle `--` in command line arguments passed to `tiup` ([#1569](https://github.com/pingcap/tiup/pull/1569), [@dveeden](https://github.com/dveeden)) ### Improvements - Reduce network usage on various operations and speed up the process - Update component manifests simultaneously in `tiup list` ([#1532](https://github.com/pingcap/tiup/pull/1532), [@nexustar](https://github.com/nexustar)) - Not requesting manifest before actually using the component ([#1539](https://github.com/pingcap/tiup/pull/1539), [@nexustar](https://github.com/nexustar)) - Update `root.json` only when necessary ([#1554](https://github.com/pingcap/tiup/pull/1554), [@nexustar](https://github.com/nexustar)) - Use the value of `--wait-timeout` argument as timeout of SSH command operations with the `builtin` executor ([#1445](https://github.com/pingcap/tiup/pull/1445), [@AstroProfundis](https://github.com/AstroProfundis)) - Refuse to `clone` a local mirror to the same location it is stored ([#1464](https://github.com/pingcap/tiup/pull/1464), [@dveeden](https://github.com/dveeden)) - Set terminal title to show session tag in `tiup-playground` ([#1506](https://github.com/pingcap/tiup/pull/1506), [@dveeden](https://github.com/dveeden)) - Show TiDB port when scale out in `tiup-playground` ([#1520](https://github.com/pingcap/tiup/pull/1520), [@nexustar](https://github.com/nexustar)) - Cleanup files if component fails to install ([#1562](https://github.com/pingcap/tiup/pull/1562), [@nexustar](https://github.com/nexustar)) - Update docs and examples ([#1484](https://github.com/pingcap/tiup/pull/1484), [@ichn-hu](https://github.com/ichn-hu); [#1502](https://github.com/pingcap/tiup/pull/1502), [@AstroProfundis](https://github.com/AstroProfundis)) - Use auto completion from `cobra` itself ([#1544](https://github.com/pingcap/tiup/pull/1544), [@AstroProfundis](https://github.com/AstroProfundis); [#1549](https://github.com/pingcap/tiup/pull/1549), [@nexustar](https://github.com/nexustar)) ## [1.5.6] 2021-09-01 ### Fixes - Fix OS version check rules for `tiup-cluster check` ([#1535](https://github.com/pingcap/tiup/pull/1535), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix component upgrade order for `tiup-cluster` to make sure TiCDC nodes work correctly ([#1542](https://github.com/pingcap/tiup/pull/1542), [@overvenus](https://github.com/overvenus)) ### Improvements - Adjust warning message of `tiup-cluster restart` to make users clear that the cluster will be unavailable during the process ([#1523](https://github.com/pingcap/tiup/pull/1523), [@glkappe](https://github.com/glkappe)) - Reverse the order of audit log listing to show latest records at the buttom ([#1538](https://github.com/pingcap/tiup/pull/1538), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.5.5] 2021-08-19 ### Fixes - Fix error when reloading a stopped cluster with `--skip-restart` argument ([#1513](https://github.com/pingcap/tiup/pull/1513), [@AstroProfundis](https://github.com/AstroProfundis)) - Use absolute path for `sudo` command, to workaround errors on systems where `devtoolset` is enabled ([#1516](https://github.com/pingcap/tiup/pull/1516), [@nexustar](https://github.com/nexustar)) - Fix custom TiDB port not correctly set in playground ([#1511](https://github.com/pingcap/tiup/pull/1511), [@hecomlilong](https://github.com/hecomlilong)) ### Improvements - Adjust suggested argument order in playground examples ([#1522](https://github.com/pingcap/tiup/pull/1522), [@glkappe](https://github.com/glkappe)) ## [1.5.4] 2021-08-05 ### Fixes - Allow editing of `lerner_config` field in TiFlash spec ([#1494](https://github.com/pingcap/tiup/pull/1494), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix incorrect timeout for telemetry requests ([#1500](https://github.com/pingcap/tiup/pull/1500), [@AstroProfundis](https://github.com/AstroProfundis)) - Ignore `data_dir` of monitor agents when checking for directory overlaps ([#1510](https://github.com/pingcap/tiup/pull/1510), [@AstroProfundis](https://github.com/AstroProfundis)) ### Improvements - Distinguish cookie names of multiple grafana instances on the same host ([#1491](https://github.com/pingcap/tiup/pull/1491), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.5.3] 2021-07-15 ### Fixes - Fix incorrect alert rules for TiDB version 3.x ([#1463](https://github.com/pingcap/tiup/pull/1463), [@9547](https://github.com/9547)) - Fix TiKV config check to correctly handle the `data_dir` value ([#1471](https://github.com/pingcap/tiup/pull/1471), [@tabokie](https://github.com/tabokie)) ### Improvements - Update dependencies and adjust error message of `ctl` ([#1459](https://github.com/pingcap/tiup/pull/1459), [@AstroProfundis](https://github.com/AstroProfundis)) - Use `$SHELL` environment variable for completion ([#1455](https://github.com/pingcap/tiup/pull/1455), [@dveeden](https://github.com/dveeden)) - Allow listing components from local cached manifests without network access ([#1466](https://github.com/pingcap/tiup/pull/1466), [@c4pt0r](https://github.com/c4pt0r)) - Adjust error message of SELinux check failure ([#1476](https://github.com/pingcap/tiup/pull/1476), [@AstroProfundis](https://github.com/AstroProfundis)) - Adjust warning message when `scale-in` with `--force` argument to make it more clear of potential risks ([#1477](https://github.com/pingcap/tiup/pull/1477), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.5.2] 2021-06-24 ### Fixes - Fix native SSH not working with custom SSH port ([#1424](https://github.com/pingcap/tiup/pull/1424), [@9547](https://github.com/9547)) - Fix dashboard address displaying issue for `tikv-slim` clusters ([#1428](https://github.com/pingcap/tiup/pull/1428), [@iosmanthus](https://github.com/iosmanthus)) - Fix a typo in help message of `tiup-playground` ([#1429](https://github.com/pingcap/tiup/pull/1429), [@ekexium](https://github.com/ekexium)) - Fix TiFlash nodes not handled correctly in some commands ([#1431](https://github.com/pingcap/tiup/pull/1431), [@lucklove](https://github.com/lucklove)) - Fix jemalloc config for TiKV nodes ([#1435](https://github.com/pingcap/tiup/pull/1435), [@9547](https://github.com/9547)) - Fix the issue that slow log is not placed under `log_dir` ([#1441](https://github.com/pingcap/tiup/pull/1441), [@lucklove](https://github.com/lucklove)) ### Improvements - Update default alertmanager config template to avoid confusing ([#1425](https://github.com/pingcap/tiup/pull/1425) [#1426](https://github.com/pingcap/tiup/pull/1426), [@lucklove](https://github.com/lucklove)) - Increase default timeout of transferring leader in upgrade progress ([#1434](https://github.com/pingcap/tiup/pull/1434), [@AstroProfundis](https://github.com/AstroProfundis)) - Update dependencies ([#1433](https://github.com/pingcap/tiup/pull/1433), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.5.1] 2021-06-11 ### Fix - Fix the issue that some versions of TiCDC node may fail to start in `tiup-cluster` ([#1421](https://github.com/pingcap/tiup/pull/1421), [@JinLingChristopher](https://github.com/JinLingChristopher)) ## [1.5.0] 2021-06-09 ### New Features - Show more information in `display` subcommand of `tiup-cluster` - Add an `--uptime` argument to show time since the last state change of process ([#1231](https://github.com/pingcap/tiup/pull/1231), [@9547](https://github.com/9547)) - Show deploy user in `display` output and adjust formats ([#1390](https://github.com/pingcap/tiup/pull/1390) [#1409](https://github.com/pingcap/tiup/pull/1409), [@AstroProfundis](https://github.com/AstroProfundis)) - Add JSON output for `display` subcommand of `tiup-cluster` ([#1358](https://github.com/pingcap/tiup/pull/1358), [@dveeden](https://github.com/dveeden)) - Add double confirmation for `scale-out` subcommand in `tiup-cluster` to let users be aware of global configs being used ([#1309](https://github.com/pingcap/tiup/pull/1309), [@AstroProfundis](https://github.com/AstroProfundis)) - Support deploying pure TiKV cluster with `--mode tikv-slim` in `playground` ([#1333](https://github.com/pingcap/tiup/pull/1333), [@iosmanthus](https://github.com/iosmanthus); [#1365](https://github.com/pingcap/tiup/pull/1365), [@tisonkun](https://github.com/tisonkun)) - Support data dir settings for TiCDC in `tiup-cluster` ([#1372](https://github.com/pingcap/tiup/pull/1372), [@JinLingChristopher](https://github.com/JinLingChristopher)) - Support change of `GCTTL` and `TZ` configs for TiCDC in `tiup-cluster` ([#1380](https://github.com/pingcap/tiup/pull/1380), [@amyangfei](https://github.com/amyangfei)) - Add a local deployment template for `tiup-cluster` ([#1404](https://github.com/pingcap/tiup/pull/1404), [@kolbe](https://github.com/kolbe)) - Support using dot (`.`) in cluster name ([#1412](https://github.com/pingcap/tiup/pull/1412), [@9547](https://github.com/9547)) ### Fixes - Fix a variety of typos ([#1306](https://github.com/pingcap/tiup/pull/1306), [@kolbe](https://github.com/kolbe)) - Fix non-common speed units shown in downloading progress ([#1312](https://github.com/pingcap/tiup/pull/1312), [@dveeden](https://github.com/dveeden)) - Fix the issue that it may panic when user try to list expired component ([#1391](https://github.com/pingcap/tiup/pull/1391), [@lucklove](https://github.com/lucklove)) - Fix the issue that tikv not upgraded on error increasing schedule limit ([#1401](https://github.com/pingcap/tiup/pull/1401), [@AstroProfundis](https://github.com/AstroProfundis)) ### Improvements - Support specifying node counts in tests ([#1251](https://github.com/pingcap/tiup/pull/1251), [@9547](https://github.com/9547)) - Add double confirmation for `reload`, `patch` and `rename` subcommands in `tiup-cluster` ([#1263](https://github.com/pingcap/tiup/pull/1263), [@9547](https://github.com/9547)) - Add ability to list available make targets for developers ([#1277](https://github.com/pingcap/tiup/pull/1277), [@rkazak](https://github.com/rkazak)) - Update links in doc/dev/README.md file ([#1296](https://github.com/pingcap/tiup/pull/1296), [@mjonss](https://github.com/mjonss)) - Improve handling of latest versions in `mirror clone` subcommand ([#1313](https://github.com/pingcap/tiup/pull/1313), [@dveeden](https://github.com/dveeden)) - Add check for dependencies before downloading package in installation script ([#1348](https://github.com/pingcap/tiup/pull/1348), [@AstroProfundis](https://github.com/AstroProfundis)) - Simplified the handling of configs imported from TiDB-Ansible ([#1350](https://github.com/pingcap/tiup/pull/1350), [@lucklove](https://github.com/lucklove)) - Implement native scp downloading ([#1382](https://github.com/pingcap/tiup/pull/1382), [@AstroProfundis](https://github.com/AstroProfundis)) - Update and fix dependencies ([#1362](https://github.com/pingcap/tiup/pull/1362), [@AstroProfundis](https://github.com/AstroProfundis); [#1407](https://github.com/pingcap/tiup/pull/1407), [@dveeden](https://github.com/dveeden)) ## [1.4.4] 2021.05.26 ### Fixes - Fix the issue that upgrade process may fail if the PD node is not available for longer than normal after restart ([#1359](https://github.com/pingcap/tiup/pull/1359), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix incorrect `MALLOC_CONF` value for TiKV node, set `prof_active` to `false` ([#1361](https://github.com/pingcap/tiup/pull/1361) [#1369](https://github.com/pingcap/tiup/pull/1369), [@YangKeao](https://github.com/YangKeao)) - Risk of this issue: Generating prof data for TiKV node with `prof_active=true` may cause high CPU systime usage in some circumstances, users need to regenerate startup scripts for TiKV nodes with `tiup cluster reload -R tikv` to make the update applied - Fix the issue that the global `log_dir` not generated correctly for absolute paths ([#1376](https://github.com/pingcap/tiup/pull/1376), [@lucklove](https://github.com/lucklove)) - Fix the issue that `display` command may report label mismatch warning if `placement-rule` is enabled ([#1378](https://github.com/pingcap/tiup/pull/1378), [@lucklove](https://github.com/lucklove)) - Fix the issue that SELinux setting is incorrect when `tiup-cluster` tries to disable it with `check --apply` ([#1383](https://github.com/pingcap/tiup/pull/1383), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix the issue that when scaling out instance on a host imported from `tidb-ansible`, the process may report error about monitor directory conflict ([#1386](https://github.com/pingcap/tiup/pull/1386), [@lucklove](https://github.com/lucklove)) ### Improvements - Allow scale in cluster when there is no TiSpark master node but have worker node in the topology ([#1363](https://github.com/pingcap/tiup/pull/1363), [@AstroProfundis](https://github.com/AstroProfundis)) - Make port check error message more clear to users ([#1367](https://github.com/pingcap/tiup/pull/1367), [@JinLingChristopher](https://github.com/JinLingChristopher)) ## [1.4.3] 2021.05.13 ### Fixes - Fix OS check for RHEL in `tiup-cluster` ([#1336](https://github.com/pingcap/tiup/pull/1336), [@AstroProfundis](https://github.com/AstroProfundis)) - Check for command depends before downloading packages in install script ([#1348](https://github.com/pingcap/tiup/pull/1348), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix the issue that install script downloads an old TiUP package ([#1349](https://github.com/pingcap/tiup/pull/1349), [@lucklove](https://github.com/lucklove)) - Fix the issue that drainer node imported from TiDB-Ansible may have incorrect `data_dir` ([#1346](https://github.com/pingcap/tiup/pull/1346), [@AstroProfundis](https://github.com/AstroProfundis)) ### Improvements - Optimize some subcommands of `tiup mirror` ([#1331](https://github.com/pingcap/tiup/pull/1331), [@AstroProfundis](https://github.com/AstroProfundis)) - Set proper User-Agent for requests downloading manifests and files from remote ([#1342](https://github.com/pingcap/tiup/pull/1342), [@AstroProfundis](https://github.com/AstroProfundis)) - Add basic telemetry report for `tiup` and `playground` ([#1341](https://github.com/pingcap/tiup/pull/1341) [#1353](https://github.com/pingcap/tiup/pull/1353), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.4.2] 2021.04.26 ### Fixes - Send meta output from `tiup` to `stderr` to not to mix with output of components ([#1298](https://github.com/pingcap/tiup/pull/1298), [@dveeden](https://github.com/dveeden)) - Update confusing version selection examples in help message of `playground` ([#1318](https://github.com/pingcap/tiup/pull/1318), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix the issue that `tiup mirror clone` command does exclude yanked component correctly ([#1321](https://github.com/pingcap/tiup/pull/1321), [@lucklove](https://github.com/lucklove)) ### Improvements - Adjust output messages and operation processes of `tiup mirror` command ([#1302](https://github.com/pingcap/tiup/pull/1302), [@AstroProfundis](https://github.com/AstroProfundis)) - Add `tiup mirror show` subcommand to display current mirror address in use ([#1317](https://github.com/pingcap/tiup/pull/1317), [@baurine](https://github.com/baurine)) - Optimize error handling if `root.json` fails to load ([#1303](https://github.com/pingcap/tiup/pull/1303), [@AstroProfundis](https://github.com/AstroProfundis)) - Update MySQL client connection example in `playground` ([#1323](https://github.com/pingcap/tiup/pull/1323), [@tangenta](https://github.com/tangenta)) - Adjust data and fields report via telemetry ([#1327](https://github.com/pingcap/tiup/pull/1327), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.4.1] 2021.04.07 ### Fixes - Fix pprof failing for TiKV in playground ([#1272](https://github.com/pingcap/tiup/pull/1272), [@hicqu](https://github.com/hicqu)) - Fix the issue that TiFlash node may be failed to restart in playground ([#1280](https://github.com/pingcap/tiup/pull/1280), [@lucklove](https://github.com/lucklove)) - Fix the issue that `binlog_enable` is not imported from tidb-ansible correctly ([#1261](https://github.com/pingcap/tiup/pull/1261), [@lucklove](https://github.com/lucklove)) - Fix directory conflict check error for TiDB and DM clusters imported from ansible deployment ([#1273](https://github.com/pingcap/tiup/pull/1273), [@lucklove](https://github.com/lucklove)) - Fix compatibility issue during upgrade for PD v3.x ([#1274](https://github.com/pingcap/tiup/pull/1274), [@lucklove](https://github.com/lucklove)) - Fix failure of parsing very long audit log in replay for tiup-cluster ([#1259](https://github.com/pingcap/tiup/pull/1259), [@lucklove](https://github.com/lucklove)) - Fix log dir path of Grafana for tiup-cluster ([#1276](https://github.com/pingcap/tiup/pull/1276), [@rkazak](https://github.com/rkazak)) - Fix config check error when the cluster was deployed with an legacy nightly version in tiup-cluster ([#1281](https://github.com/pingcap/tiup/pull/1281), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix error when using nightly version while the actual component is not available in repo ([#1294](https://github.com/pingcap/tiup/pull/1294), [@lucklove](https://github.com/lucklove)) ### Improvements - Refine PD scaling script rendering to optimize the code ([#1253](https://github.com/pingcap/tiup/pull/1253), [@9547](https://github.com/9547)) - Start PD and DM master nodes sequentially in ([#1262](https://github.com/pingcap/tiup/pull/1262), [@9547](https://github.com/9547)) - Properly follow the ignore config check argument in reload for tiup-cluster ([#1265](https://github.com/pingcap/tiup/pull/1265), [@9547](https://github.com/9547)) ## [1.4.0] 2021.03.31 ### New Features - EXPERIMENTAL: Add support of Apple M1 devices ([#1122](https://github.com/pingcap/tiup/issues/1122), [@terasum](https://github.com/terasum) [@AstroProfundis](https://github.com/AstroProfundis) [@sunxiaoguang](https://github.com/sunxiaoguang)) - Playground may not fully work as some components don't yet have packages for `darwin-arm64` released - Not displaying dashboard address if it's "none" or "auto" ([#1054](https://github.com/pingcap/tiup/pull/1054), [@9547](https://github.com/9547)) - Support filtering nodes and roles in `check` subcommand of tiup-cluster ([#1030](https://github.com/pingcap/tiup/pull/1030), [@AstroProfundis](https://github.com/AstroProfundis)) - Support retry of failed operations from where it broke with `replay` command of tiup-cluster and tiup-dm ([#1069](https://github.com/pingcap/tiup/pull/1069) [#1157](https://github.com/pingcap/tiup/pull/1157), [@lucklove](https://github.com/lucklove)) - Support upgrade and patch a stopped TiDB / DM cluster ([#1096](https://github.com/pingcap/tiup/pull/1096), [@lucklove](https://github.com/lucklove)) - Support setting global custom values for topology of tiup-cluster ([#1098](https://github.com/pingcap/tiup/pull/1098), [@lucklove](https://github.com/lucklove)) - Support custom `root_url` and anonymous login for Grafana in tiup-cluster ([#1085](https://github.com/pingcap/tiup/pull/1085), [@mianhk](https://github.com/mianhk)) - Support remote read and remote write for Prometheus node in tiup-cluster ([#1070](https://github.com/pingcap/tiup/pull/1070), [@XSHui](https://github.com/XSHui)) - Support custom external AlertManager target for Prometheus node in tiup-cluster ([#1149](https://github.com/pingcap/tiup/pull/1149), [@lucklove](https://github.com/lucklove)) - Support force reinstallation of already installed component ([#1145](https://github.com/pingcap/tiup/pull/1145), [@9547](https://github.com/9547)) - Add `--force` and retain data options to tiup-dm ([#1080](https://github.com/pingcap/tiup/pull/1080), [@9547](https://github.com/9547)) - Add `enable`/`disable` subcommands to tiup-dm ([#1114](https://github.com/pingcap/tiup/pull/1114), [@9547](https://github.com/9547)) - Add `template` subcommand to tiup-cluster to print pre-defined topology templates ([#1156](https://github.com/pingcap/tiup/pull/1156), [@lucklove](https://github.com/lucklove)) - Add `--version` option to `display` subcommand of tiup-cluster to print the cluster version ([#1207](https://github.com/pingcap/tiup/pull/1207), [@AstroProfundis](https://github.com/AstroProfundis)) - Allow value type change when editing topology with `edit-config` subcommand of tiup-cluster ([#1050](https://github.com/pingcap/tiup/pull/1050), [@AstroProfundis](https://github.com/AstroProfundis)) ### Fixes - Not allowing deployment if the input topology file is empty ([#994](https://github.com/pingcap/tiup/pull/994), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix data dir setting for Prometheus ([#1040](https://github.com/pingcap/tiup/pull/1040), [@9547](https://github.com/9547)) - Fix the issue that pre-defined Prometheus rules may be missing if a custom `rule_dir` is set ([#1073](https://github.com/pingcap/tiup/pull/1073), [@9547](https://github.com/9547)) - Fix the issue that config files of Prometheus and Grafana are not checked before start ([#1074](https://github.com/pingcap/tiup/pull/1074), [@9547](https://github.com/9547)) - Fix the issue that cluster name is not validated for some operations ([#1177](https://github.com/pingcap/tiup/pull/1177), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix the issue that tiup-cluster reloads a cluster even if the config may contain errors ([#1183](https://github.com/pingcap/tiup/pull/1183), [@9547](https://github.com/9547)) - Fix the issue that `publish` command may fail when uploading files without retry ([#1174](https://github.com/pingcap/tiup/pull/1174) [#1202](https://github.com/pingcap/tiup/pull/1202), [@AstroProfundis](https://github.com/AstroProfundis); [#1167](https://github.com/pingcap/tiup/pull/1163), [@lucklove](https://github.com/lucklove)) - Fix the issue that newly added TiFlash nodes may fail to start during `scale-out` in tiup-cluster ([#1227](https://github.com/pingcap/tiup/pull/1227), [@9547](https://github.com/9547)) - Fix incorrect cluster name in alert messages ([#1238](https://github.com/pingcap/tiup/pull/1238), [@9547](https://github.com/9547)) - Fix the issue that blackbox_exporter may not collecting ping metrics correctly ([#1250](https://github.com/pingcap/tiup/pull/1250), [@STRRL](https://github.com/STRRL)) ### Improvements - Reduce jitter during upgrade process of TiDB cluster - Make sure PD node is online and serving before upgrading the next one ([#1032](https://github.com/pingcap/tiup/pull/1032), [@HunDunDM](https://github.com/HunDunDM)) - Upgrade PD leader node after upgrading other PD nodes ([#1086](https://github.com/pingcap/tiup/pull/1086), [@AstroProfundis](https://github.com/AstroProfundis)) - Increase schedule limit during upgrade of TiKV nodes ([#1661](https://github.com/pingcap/tiup/pull/1161), [@AstroProfundis](https://github.com/AstroProfundis)) - Add check to validate if all regions are healthy ([#1126](https://github.com/pingcap/tiup/pull/1126), [@AstroProfundis](https://github.com/AstroProfundis)) - Only reload Prometheus configs when needed ([#989](https://github.com/pingcap/tiup/pull/989), [@9547](https://github.com/9547)) - Show default option on prompted input messages ([#1132](https://github.com/pingcap/tiup/pull/1132) [#1134](https://github.com/pingcap/tiup/pull/1134), [@wangbinhe3db](https://github.com/wangbinhe3db)) - Include user's input in error message if prompted challenge didn't pass ([#1104](https://github.com/pingcap/tiup/pull/1104), [@AstroProfundis](https://github.com/AstroProfundis)) - Check for `data_dir` and `log_dir` overlap before deploying a cluster ([#1093](https://github.com/pingcap/tiup/pull/1093), [@9547](https://github.com/9547)) - Improve checking rules in `tiup cluster check` command ([#1099](https://github.com/pingcap/tiup/pull/1099) [#1107](https://github.com/pingcap/tiup/pull/1107), [@AstroProfundis](https://github.com/AstroProfundis); [#1118](https://github.com/pingcap/tiup/pull/1118) [#1124](https://github.com/pingcap/tiup/pull/1124), [@9547](https://github.com/9547)) - Refine `list` and `display` command for tiup-cluster ([#1139](https://github.com/pingcap/tiup/pull/1139), [@baurine](https://github.com/baurine)) - Mark patched nodes in `display` output of tiup-cluster and tiup-dm ([#1125](https://github.com/pingcap/tiup/pull/1125), [@AstroProfundis](https://github.com/AstroProfundis)) - Ignore `users.*` settings for TiFlash if the cluster version is later than v4.0.12 and v5.0.0-rc ([#1211](https://github.com/pingcap/tiup/pull/1211), [@JaySon-Huang](https://github.com/JaySon-Huang)) - Cache `timestamp` manifest in memory to reduce network requests ([#1212](https://github.com/pingcap/tiup/pull/1212), [@lucklove](https://github.com/lucklove)) - Upgrade toolchain to Go 1.16 ([#1151](https://github.com/pingcap/tiup/pull/1151) [#1153](https://github.com/pingcap/tiup/pull/1153) [#1130](https://github.com/pingcap/tiup/pull/1130), [@AstroProfundis](https://github.com/AstroProfundis)) - Use GitHub Actions to build and release TiUP components ([#1158](https://github.com/pingcap/tiup/pull/1158), [@AstroProfundis](https://github.com/AstroProfundis)) - Remove deprecated `v0manifest` support, TiUP version before v1.0.0 may not be able to download latest packages anymore ([#906](https://github.com/pingcap/tiup/issues/906)) ## [1.3.7] 2021.03.25 ### Fixes - Fix the issue that metrics of tiflash-server instance may not collected correctly ([#1083](https://github.com/pingcap/tiup/pull/1083), [@yuzhibotao](https://github.com/yuzhibotao)) - Fix the issue that tiup-cluster disables monitoring services unexpectedly ([#1088](https://github.com/pingcap/tiup/pull/1088), [@lucklove](https://github.com/lucklove)) - Fix wrong dashboard name for lightning in Grafana after renaming a cluster with tiup-cluster ([#1196](https://github.com/pingcap/tiup/pull/1196), [@9547](https://github.com/9547)) - Fix the issue that tiup-cluster `prune` command may try to generate config for removed nodes ([#1237](https://github.com/pingcap/tiup/pull/1237), [@lucklove](https://github.com/lucklove)) ## [1.3.6] 2021.03.19 ### Fixes - Fix the issue that can't deploy arm64 binary with offline mirror ([#1229](https://github.com/pingcap/tiup/pull/1229), [@lucklove](https://github.com/lucklove)) ## [1.3.5] 2021.03.11 ### Fixes - Fix the issue that old nighlty may cause error ([#1198](https://github.com/pingcap/tiup/pull/1198), [@lucklove](https://github.com/lucklove)) ## [1.3.4] 2021.03.05 ### Fixes - Fix the issue that tiup-cluster can't generate prometheus config ([#1185](https://github.com/pingcap/tiup/pull/1185), [@lucklove](https://github.com/lucklove)) - Fix the issue that tiup may choose yanked version if it's already installed ([#1191](https://github.com/pingcap/tiup/pull/1191), [@lucklove](https://github.com/lucklove)) ## [1.3.3] 2021.03.04 ### Fixes - Fix the issue that tiup will hang forever when reloading a stopped cluster ([#1044](https://github.com/pingcap/tiup/pull/1044), [@9547](https://github.com/9547)) - Fix the issue that `tiup mirror merge` does not work on official offline package ([#1121](https://github.com/pingcap/tiup/pull/1121), [@lucklove](https://github.com/lucklove)) - Fix the issue that there may be no retry when download component failed ([#1137](https://github.com/pingcap/tiup/pull/1137), [@lucklove](https://github.com/lucklove)) - Fix the issue that PD dashboard does not report grafana address in playground ([#1142](https://github.com/pingcap/tiup/pull/1142), [@9547](https://github.com/9547)) - Fix the issue that the default selected version may be a preprelease version ([#1128](https://github.com/pingcap/tiup/pull/1128), [@lucklove](https://github.com/lucklove)) - Fix the issue that the error message is confusing when the patched tar is not correct ([#1175](https://github.com/pingcap/tiup/pull/1175), [@lucklove](https://github.com/lucklove)) ### Improvements - Add darwin-arm64 not support hint in install script ([#1123](https://github.com/pingcap/tiup/pull/1123), [@terasum](https://github.com/terasum)) - Improve playground welcome information for connecting TiDB ([#1133](https://github.com/pingcap/tiup/pull/1133), [@dveeden](https://github.com/dveeden)) - Bind latest stable grafana and prometheus in DM deploying ([#1129](https://github.com/pingcap/tiup/pull/1129), [@lucklove](https://github.com/lucklove)) - Use the advertised host instead of 0.0.0.0 for tiup-playground ([#1152](https://github.com/pingcap/tiup/pull/1152), [@9547](https://github.com/9547)) - Check tarball checksum on tiup-server when publish component ([#1163](https://github.com/pingcap/tiup/pull/1163), [@lucklove](https://github.com/lucklove)) ## [1.3.2] 2021.01.29 ### Fixes - Fix the issue that the grafana and alertmanager target not set in prometheus.yaml ([#1041](https://github.com/pingcap/tiup/pull/1041), [@9547](https://github.com/9547)) - Fix the issue that grafana deployed by tiup-dm missing home.json ([#1056](https://github.com/pingcap/tiup/pull/1056), [@lucklove](https://github.com/lucklove)) - Fix the issue that the expires of cloned mirror is shourened after publish component to it ([#1051](https://github.com/pingcap/tiup/pull/1051), [@lucklove](https://github.com/lucklove)) - Fix the issue that tiup-cluster may remove wrong paths for imported cluster on scale-in ([#1068](https://github.com/pingcap/tiup/pull/1068), [@AstroProfundis](https://github.com/AstroProfundis)) - Risk of this issue: If an imported cluster has deploy dir ending with `/`, and sub dirs as `//sub`, it could results to delete wrong paths on scale-in - Fix the issue that imported `*_exporter` has wrong binary path ([#1101](https://github.com/pingcap/tiup/pull/1101), [@AstroProfundis](https://github.com/AstroProfundis)) ### Improvements - Apply more strict check on tar.gz file for `patch` command: check if the entry is an executable file ([#1091](https://github.com/pingcap/tiup/pull/1091), [@lucklove](https://github.com/lucklove)) ## [1.3.1] 2020.12.31 ### Fixes - Workaround the issue that store IDs in PDs may not monotonically assigned ([#1011](https://github.com/pingcap/tiup/pull/1011), [@AstroProfundis](https://github.com/AstroProfundis)) - Currently, the ID allocator is guaranteed not to allocate duplicated IDs, but when PD leader changes multiple times, the IDs may not be monotonic - For tiup < v1.2.1, the command `tiup cluster display` may delete store (without confirm) by mistake due to this issue (high risk) - For tiup >= v1.2.1 and <= v1.3.0, the command `tiup cluster display` may display `up` stores as `tombstone`, and encourages the user to delete them with the command `tiup cluster prune` (medium risk) - Fix the issue that the `cluster check` always fail on thp check even though the thp is disabled ([#1005](https://github.com/pingcap/tiup/pull/1005), [@lucklove](https://github.com/lucklove)) - Fix the issue that the command `tiup mirror merge -h` outputs wrong usage ([#1008](https://github.com/pingcap/tiup/pull/1008), [@lucklove](https://github.com/lucklove)) - The syntax of this command should be `tiup mirror merge [mirror-dir-N]` but it outputs `tiup mirror merge [mirror-dir-N]` - Fix the issue that prometheus doesn't collect drainer metrics ([#1012](https://github.com/pingcap/tiup/pull/1012), [@SE-Bin](https://github.com/SE-Bin)) ### Improvements - Reduce display duration when PD nodes encounter network problems and dropping packages ([#986](https://github.com/pingcap/tiup/pull/986), [@9547](https://github.com/9547)) - cluster, dm: support version input without leading 'v' ([#1009](https://github.com/pingcap/tiup/pull/1009), [@AstroProfundis](https://github.com/AstroProfundis)) - Add a warning to explain that we will stop the cluster before clean logs ([#1029](https://github.com/pingcap/tiup/pull/1029), [@lucklove](https://github.com/lucklove)) - When a user try to clean logs with the command `tiup cluster clean --logs`, he may expect that the cluster is still running during the clean operation - The actual situation is not what he expect, which may surprise the user (risk) ## [1.3.0] 2020.12.17 ### New Features - Modify TiFlash's query memory limit from 10GB to 0(unlimited) in playground cluster ([#907](https://github.com/pingcap/tiup/pull/907), [@LittleFall](https://github.com/LittleFall)) - Import configuration into topology meta when migrating a cluster from Ansible ([#766](https://github.com/pingcap/tiup/pull/766), [@yuzhibotao](https://github.com/yuzhibotao)) - Before, we stored imported ansible config in ansible-imported-configs which is hidden for users, in this release, we merge the configs into meta.yaml so that the user can see the config with the command `tiup cluster edit` - Enhance the `tiup mirror` command ([#860](https://github.com/pingcap/tiup/pull/860), [@lucklove](https://github.com/lucklove)) - **Support merge two or more mirrors into one** - Support publish component to local mirror besides remote mirror - Support add component owner to local mirror - Partially support deploy cluster with hostname besides ip address (**EXPERIMENTAL**) ([#948](https://github.com/pingcap/tiup/pull/948),[#949](https://github.com/pingcap/tiup/pull/949), [@fln](https://github.com/fln)) - Not usable for production, as there would be issue if a hostname resolves to a new IP address after deployment - Support setting custom timeout for waiting instances up in playground-cluster ([#968](https://github.com/pingcap/tiup/pull/968), [@unbyte](https://github.com/unbyte)) - Support check and disable THP in `tiup cluster check` ([#964](https://github.com/pingcap/tiup/pull/964), [@anywhy](https://github.com/anywhy)) - Support sign remote manifest and rotate root.json ([#967](https://github.com/pingcap/tiup/pull/967), [@lucklove](https://github.com/lucklove)) ### Fixes - Fixed the issue that the public key created by TiUP was not removed after the cluster was destroyed ([#910](https://github.com/pingcap/tiup/pull/910), [@9547](https://github.com/9547)) - Fix the issue that user defined grafana username and password not imported from tidb-ansible cluster correctly ([#937](https://github.com/pingcap/tiup/pull/937), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix the issue that playground cluster not quitting components with correct order: TiDB -> TiKV -> PD ([#933](https://github.com/pingcap/tiup/pull/933), [@unbyte](https://github.com/unbyte)) - Fix the issue that TiKV reports wrong advertise address when `--status-addr` is set to a wildcard address like `0.0.0.0` ([#951](https://github.com/pingcap/tiup/pull/951), [@lucklove](https://github.com/lucklove)) - Fix the issue that Prometheus doesn't reload target after scale-in action ([#958](https://github.com/pingcap/tiup/pull/958), [@9547](https://github.com/9547)) - Fix the issue that the config file for TiFlash missing in playground cluster ([#969](https://github.com/pingcap/tiup/pull/969), [@unbyte](https://github.com/unbyte)) - Fix Tilfash startup failed without stderr output when numa is enabled but numactl cannot be found ([#984](https://github.com/pingcap/tiup/pull/984), [@lucklove](https://github.com/lucklove)) - Fix the issue that the deployment environment fail to copy config file when zsh is configured ([#982](https://github.com/pingcap/tiup/pull/982), [@9547](https://github.com/9547)) ### Improvements - Enable memory buddyinfo monitoring on node_exporter to collect exposes statistics of memory fragments ([#904](https://github.com/pingcap/tiup/pull/904), [@9547](https://github.com/9547)) - Move error logs dumped by tiup-dm and tiup-cluster to `${TIUP_HOME}/logs` ([#908](https://github.com/pingcap/tiup/pull/908), [@9547](https://github.com/9547)) - Allow run pure TiKV (without TiDB) cluster in playground cluster ([#926](https://github.com/pingcap/tiup/pull/926), [@sticnarf](https://github.com/sticnarf)) - Add confirm stage for upgrade action ([#963](https://github.com/pingcap/tiup/pull/963), [@Win-Man](https://github.com/Win-Man)) - Omit debug log from console output in tiup-cluster ([#977](https://github.com/pingcap/tiup/pull/977), [@AstroProfundis](https://github.com/AstroProfundis)) - Prompt list of paths to be deleted before processing in the clean action of tiup-cluster ([#981](https://github.com/pingcap/tiup/pull/981), [#993](https://github.com/pingcap/tiup/pull/993), [@AstroProfundis](https://github.com/AstroProfundis)) - Make error message of monitor port conflict more readable ([#966](https://github.com/pingcap/tiup/pull/966), [@JaySon-Huang](https://github.com/JaySon-Huang)) ## [1.2.5] 2020.11.27 ### Fixes - Fix the issue that can't operate the cluster which have tispark workers without tispark master ([#924](https://github.com/pingcap/tiup/pull/924), [@AstroProfundis](https://github.com/AstroProfundis)) - Root cause: once the tispark master been removed from the cluster, any later action will be reject by TiUP - Fix: make it possible for broken clusters to fix no tispark master error by scaling out a new tispark master node - Fix the issue that it report `pump node id not found` while drainer node id not found ([#925](https://github.com/pingcap/tiup/pull/925), [@lucklove](https://github.com/lucklove)) ### Improvements - Support deploy TiFlash on multi-disks with "storage" configurations since v4.0.9 ([#931](https://github.com/pingcap/tiup/pull/931), [#938](https://github.com/pingcap/tiup/pull/938), [@JaySon-Huang](https://github.com/JaySon-Huang)) - Check duplicated pd_servers.name in the topology before truly deploy the cluster ([#922](https://github.com/pingcap/tiup/pull/922), [@anywhy](https://github.com/anywhy)) ## [1.2.4] 2020.11.19 ### Fixes - Fix the issue that Pump & Drainer has different node id between tidb-ansible and TiUP ([#903](https://github.com/pingcap/tiup/pull/903), [@lucklove](https://github.com/lucklove)) - For the cluster imported from tidb-ansible, if the pump or drainer is restarted, it will start with a new node id - Risk of this issue: binlog may not work correctly after restart pump or drainer - Fix the issue that audit log may get lost in some special case ([#879](https://github.com/pingcap/tiup/pull/879), [#882](https://github.com/pingcap/tiup/pull/882), [@9547](https://github.com/9547)) - If the user execute two commands one follows the other, and the second one quit in 1 second, the audit log of the first command will be overwirten by the second one - Risk caused by this issue: some audit logs may get lost in above case - Fix the issue that new component deployed with `tiup cluster scale-out` doesn't auto start when rebooting ([#905](https://github.com/pingcap/tiup/pull/905), [@9547](https://github.com/9547)) - Risk caused by this issue: the cluster may be unavailable after rebooting - Fix the issue that data directory of TiFlash is not deleted if multiple data directories are specified ([#871](https://github.com/pingcap/tiup/pull/871), [@9547](https://github.com/9547)) - Fix the issue that `node_exporter` and `blackbox_exporter` not cleaned up after scale-in all instances on specified host ([#857](https://github.com/pingcap/tiup/pull/857), [@9547](https://github.com/9547)) - Fix the issue that the patch command will fail when try to patch dm cluster ([#884](https://github.com/pingcap/tiup/pull/884), [@lucklove](https://github.com/lucklove)) - Fix the issue that the bench component report `Error 1105: client has multi-statement capability disabled` ([#887](https://github.com/pingcap/tiup/pull/887), [@mahjonp](https://github.com/mahjonp)) - Fix the issue that the TiSpark node can't be upgraded ([#901](https://github.com/pingcap/tiup/pull/901), [@lucklove](https://github.com/lucklove)) - Fix the issue that playground cluster can't start TiFlash with newest nightly PD ([#902](https://github.com/pingcap/tiup/pull/902), [@lucklove](https://github.com/lucklove)) ### Improvements - Ignore no tispark master error when listing clusters since the master node may be remove by `scale-in --force` ([#920](https://github.com/pingcap/tiup/pull/920), [@AstroProfundis](https://github.com/AstroProfundis)) ## [1.2.3] 2020.10.30 ### Fixes - Fix misleading warning message in the display command ([#869](https://github.com/pingcap/tiup/pull/869), [@lucklove](https://github.com/lucklove)) ## [1.2.1] 2020.10.23 ### Improvements - Introduce a more safe way to cleanup tombstone nodes ([#858](https://github.com/pingcap/tiup/pull/858), [@lucklove](https://github.com/lucklove)) - When an user `scale-in` a TiKV server, it's data is not deleted until the user executes a `display` command, it's risky because there is no choice for user to confirm - We have add a `prune` command for the cleanup stage, the display command will not cleanup tombstone instance any more - Skip auto-start the cluster before the scale-out action because there may be some damaged instance that can't be started ([#848](https://github.com/pingcap/tiup/pull/848), [@lucklove](https://github.com/lucklove)) - In this version, the user should make sure the cluster is working correctly by themselves before executing `scale-out` - Introduce a more graceful way to check TiKV labels ([#843](https://github.com/pingcap/tiup/pull/843), [@lucklove](https://github.com/lucklove)) - Before this change, we check TiKV labels from the config files of TiKV and PD servers, however, servers imported from tidb-ansible deployment don't store latest labels in local config, this causes inaccurate label information - After this we will fetch PD and TiKV labels with PD api in display command ### Fixes - Fix the issue that there is datarace when concurrent save the same file ([#836](https://github.com/pingcap/tiup/pull/836), [@9547](https://github.com/9547)) - We found that while the cluster deployed with TLS supported, the ca.crt file was saved multi times in parallel, this may lead to the ca.crt file to be left empty - The influence of this issue is that the tiup client may not communicate with the cluster - Fix the issue that files copied by TiUP may have different mode with origin files ([#844](https://github.com/pingcap/tiup/pull/844), [@lucklove](https://github.com/lucklove)) - Fix the issue that the tiup script not updated after `scale-in` PD ([#824](https://github.com/pingcap/tiup/pull/824), [@9547](https://github.com/9547)) ## [1.2.0] 2020.09.29 ### New Features - Support tiup env sub command ([#788](https://github.com/pingcap/tiup/pull/788), [@lucklove](https://github.com/lucklove)) - Support TiCDC for playground ([#777](https://github.com/pingcap/tiup/pull/777), [@leoppro](https://github.com/leoppro)) - Support limiting core dump size ([#817](https://github.com/pingcap/tiup/pull/817), [@lucklove](https://github.com/lucklove)) - Support using latest Spark and TiSpark release ([#779](https://github.com/pingcap/tiup/pull/779), [@lucklove](https://github.com/lucklove)) - Support new cdc arguments `gc-ttl` and `tz` ([#770](https://github.com/pingcap/tiup/pull/770), [@lichunzhu](https://github.com/lichunzhu)) - Support specifying custom ssh and scp path ([#734](https://github.com/pingcap/tiup/pull/734), [@9547](https://github.com/9547)) ### Fixes - Fix `tiup update --self` results to tiup's binary file deleted ([#816](https://github.com/pingcap/tiup/pull/816), [@lucklove](https://github.com/lucklove)) - Fix per-host custom port for drainer not handled correctly on importing ([#806](https://github.com/pingcap/tiup/pull/806), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix the issue that help message is inconsistent ([#758](https://github.com/pingcap/tiup/pull/758), [@9547](https://github.com/9547)) - Fix the issue that dm not applying config files correctly ([#810](https://github.com/pingcap/tiup/pull/810), [@lucklove](https://github.com/lucklove)) - Fix the issue that playground display wrong TiDB number in error message ([#821](https://github.com/pingcap/tiup/pull/821), [@SwanSpouse](https://github.com/SwanSpouse)) ### Improvements - Automatically check if TiKV's label is set ([#800](https://github.com/pingcap/tiup/pull/800), [@lucklove](https://github.com/lucklove)) - Download component with stream mode to avoid memory explosion ([#755](https://github.com/pingcap/tiup/pull/755), [@9547](https://github.com/9547)) - Save and display absolute path for deploy directory, data directory and log directory to avoid confusion ([#822](https://github.com/pingcap/tiup/pull/822), [@lucklove](https://github.com/lucklove)) - Redirect DM stdout to log files ([#815](https://github.com/pingcap/tiup/pull/815), [@csuzhangxc](https://github.com/csuzhangxc)) - Skip download nightly package when it exists ([#793](https://github.com/pingcap/tiup/pull/793), [@lucklove](https://github.com/lucklove)) ## [1.1.2] 2020.09.11 ### Fixes - Fix the issue that TiKV store leader count is not correct ([#762](https://github.com/pingcap/tiup/pull/762)) - Fix the issue that TiFlash's data is not clean up ([#768](https://github.com/pingcap/tiup/pull/768)) - Fix the issue that `tiup cluster deploy --help` display wrong help message ([#758](https://github.com/pingcap/tiup/pull/758)) - Fix the issue that tiup-playground can't display and scale ([#749](https://github.com/pingcap/tiup/pull/749)) ## [1.1.1] 2020.09.01 ### Fixes - Remove the username `root` in sudo command [#731](https://github.com/pingcap/tiup/issues/731) - Transfer the default alertmanager.yml if the local config file not specified [#735](https://github.com/pingcap/tiup/issues/735) - Only remove corresponed config files in InitConfig for monitor service in case it's a shared directory [#736](https://github.com/pingcap/tiup/issues/736) ## [1.1.0] 2020.08.28 ### New Features - [experimental] Support specifying customized configuration files for monitor components ([#712](https://github.com/pingcap/tiup/pull/712), [@lucklove](https://github.com/lucklove)) - Support specifying user group or skipping creating a user in the deploy and scale-out stage ([#678](https://github.com/pingcap/tiup/pull/678), [@lucklove](https://github.com/lucklove)) - to specify the group: https://github.com/pingcap/tiup/blob/master/examples/topology.example.yaml#L7 - to skip creating the user: `tiup cluster deploy/scale-out --skip-create-user xxx` - [experimental] Support rename cluster by the command `tiup cluster rename ` ([#671](https://github.com/pingcap/tiup/pull/671), [@lucklove](https://github.com/lucklove)) > Grafana stores some data related to cluster name to its grafana.db. The rename action will NOT delete them. So there may be some useless panel need to be deleted manually. - [experimental] Introduce `tiup cluster clean` command ([#644](https://github.com/pingcap/tiup/pull/644), [@lucklove](https://github.com/lucklove)): - Cleanup all data in specified cluster: `tiup cluster clean ${cluster-name} --data` - Cleanup all logs in specified cluster: `tiup cluster clean ${cluster-name} --log` - Cleanup all logs and data in specified cluster: `tiup cluster clean ${cluster-name} --all` - Cleanup all logs and data in specified cluster, excepting the Prometheus service: `tiup cluster clean ${cluster-name} --all --ignore-role Prometheus` - Cleanup all logs and data in specified cluster, expecting the node `172.16.13.11:9000`: `tiup cluster clean ${cluster-name} --all --ignore-node 172.16.13.11:9000` - Cleanup all logs and data in specified cluster, expecting the host `172.16.13.11`: `tiup cluster clean ${cluster-name} --all --ignore-node 172.16.13.12` - Support skipping evicting store when there is only 1 TiKV ([#662](https://github.com/pingcap/tiup/pull/662), [@lucklove](https://github.com/lucklove)) - Support importing clusters with binlog enabled ([#652](https://github.com/pingcap/tiup/pull/652), [@AstroProfundis](https://github.com/AstroProfundis)) - Support yml source format with tiup-dm ([#655](https://github.com/pingcap/tiup/pull/655), [@july2993](https://github.com/july2993)) - Support detecting port conflict of monitoring agents between different clusters ([#623](https://github.com/pingcap/tiup/pull/623), [@AstroProfundis](https://github.com/AstroProfundis)) ### Fixes - Set correct `deploy_dir` of monitoring agents when importing ansible deployed clusters ([#704](https://github.com/pingcap/tiup/pull/704), [@AstroProfundis](https://github.com/AstroProfundis)) - Fix the issue that `tiup update --self` may make root.json invalid with offline mirror ([#659](https://github.com/pingcap/tiup/pull/659), [@lucklove](https://github.com/lucklove)) ### Improvements - Add `advertise-status-addr` for TiFlash to support host name ([#676](https://github.com/pingcap/tiup/pull/676), [@birdstorm](https://github.com/birdstorm)) ## [1.0.9] 2020.08.03 ### tiup * Clone with yanked version [#602](https://github.com/pingcap/tiup/pull/602) * Support yank a single version on client side [#602](https://github.com/pingcap/tiup/pull/605) * Support bash and zsh completion [#606](https://github.com/pingcap/tiup/pull/606) * Handle yanked version when update components [#635](https://github.com/pingcap/tiup/pull/635) ### tiup-cluster * Validate topology changes after edit-config [#609](https://github.com/pingcap/tiup/pull/609) * Allow continue editing when new topology has errors [#624](https://github.com/pingcap/tiup/pull/624) * Fix wrongly set data_dir of TiFlash when import from ansible [#612](https://github.com/pingcap/tiup/pull/612) * Support native ssh client [#615](https://github.com/pingcap/tiup/pull/615) * Support refresh configuration only when reload [#625](https://github.com/pingcap/tiup/pull/625) * Apply config file on scaled pd server [#627](https://github.com/pingcap/tiup/pull/627) * Refresh monitor configs on reload [#630](https://github.com/pingcap/tiup/pull/630) * Support posix style argument for user flag [#631](https://github.com/pingcap/tiup/pull/631) * Fix PD config incompatible when retrieving dashboard address [#638](https://github.com/pingcap/tiup/pull/638) * Integrate tispark [#531](https://github.com/pingcap/tiup/pull/531) [#621](https://github.com/pingcap/tiup/pull/621) tiup-1.16.3/CONTRIBUTING.md000066400000000000000000000004131505422223000150050ustar00rootroot00000000000000# Contribution Guide See the [Contribution Guide](https://github.com/pingcap/community/blob/master/contributors/README.md) in the [community](https://github.com/pingcap/community) repo. The content of [tiup.io](https://tiup.io) webpage is at the `gh-pages` branch. tiup-1.16.3/LICENSE000066400000000000000000000261351505422223000135720ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. tiup-1.16.3/Makefile000066400000000000000000000106441505422223000142230ustar00rootroot00000000000000.PHONY: components server targets .DEFAULT_GOAL := default LANG=C MAKEOVERRIDES = targets: @printf "%-30s %s\n" "Target" "Description" @printf "%-30s %s\n" "------" "-----------" @make -pqR : 2>/dev/null \ | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' \ | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' \ | sort \ | xargs -I _ sh -c 'printf "%-30s " _; make _ -nB | (grep "^# Target:" || echo "") | tail -1 | sed "s/^# Target: //g"' REPO := github.com/pingcap/tiup GOOS := $(if $(GOOS),$(GOOS),$(shell go env GOOS)) GOARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) GOENV := GO111MODULE=on CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) GO := $(GOENV) go GOBUILD := $(GO) build $(BUILD_FLAGS) GOINSTALL := $(GO) install GOTEST := GO111MODULE=on CGO_ENABLED=1 go test -p 3 SHELL := /usr/bin/env bash _COMMIT := $(shell git describe --no-match --always --dirty) _GITREF := $(shell git rev-parse --abbrev-ref HEAD) COMMIT := $(if $(COMMIT),$(COMMIT),$(_COMMIT)) GITREF := $(if $(GITREF),$(GITREF),$(_GITREF)) LDFLAGS := -w -s LDFLAGS += -X "$(REPO)/pkg/version.GitHash=$(COMMIT)" LDFLAGS += -X "$(REPO)/pkg/version.GitRef=$(GITREF)" LDFLAGS += $(EXTRA_LDFLAGS) FILES := $$(find . -name "*.go") FAILPOINT_ENABLE := $$(go tool github.com/pingcap/failpoint/failpoint-ctl enable) FAILPOINT_DISABLE := $$(go tool github.com/pingcap/failpoint/failpoint-ctl disable) default: check build @# Target: run the checks and then build. include ./tests/Makefile # Build TiUP and all components build: tiup components @# Target: build tiup and all it's components components: playground client cluster dm server @# Target: build the playground, client, cluster, dm and server components tiup: @# Target: build the tiup driver $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/tiup playground: @# Target: build tiup-playground component $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/tiup-playground ./components/playground client: @# Target: build the tiup-client component $(MAKE) -C components/client $(MAKECMDGOALS) cluster: @# Target: build the tiup-cluster component $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/tiup-cluster ./components/cluster dm: @# Target: build the tiup-dm component $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/tiup-dm ./components/dm ctl: @# Target: build the tiup-ctl component $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/tiup-ctl ./components/ctl server: @# Target: build the tiup-server component $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/tiup-server ./server check: fmt lint tidy check-static vet @# Target: run all checkers. (fmt, lint, tidy, check-static and vet) $(MAKE) -C components/client ${MAKECMDGOALS} check-static: tools/bin/golangci-lint @# Target: run the golangci-lint static check tool tools/bin/golangci-lint run --config tools/check/golangci.yaml ./... --timeout=3m --fix lint: @# Target: run the lint checker revive @echo "linting" @go tool github.com/mgechev/revive -formatter friendly -config tools/check/revive.toml $(FILES) vet: @# Target: run the go vet tool $(GO) vet ./... tidy: @# Target: run tidy check @echo "go mod tidy" ./tools/check/check-tidy.sh clean: @# Target: run the build cleanup steps @rm -rf bin @rm -rf cover @rm -rf tests/*/{bin/*.test,logs,cover/*.out} test: failpoint-enable run-tests failpoint-disable @# Target: run tests with failpoint enabled $(MAKE) -C components/client ${MAKECMDGOALS} # TODO: refactor integration tests base on v1 manifest # run-tests: unit-test integration_test run-tests: unit-test @# Target: run the unit tests # Run tests unit-test: @# Target: run the code coverage test phase mkdir -p cover TIUP_HOME=$(shell pwd)/tests/tiup $(GOTEST) ./... -covermode=count -coverprofile cover/cov.unit-test.out race: failpoint-enable @# Target: run race check with failpoint enabled TIUP_HOME=$(shell pwd)/tests/tiup $(GOTEST) -race ./... || { $(FAILPOINT_DISABLE); exit 1; } @$(FAILPOINT_DISABLE) failpoint-enable: @# Target: enable failpoint @$(FAILPOINT_ENABLE) failpoint-disable: @# Target: disable failpoint @$(FAILPOINT_DISABLE) fmt: @# Target: run the go formatter utility @echo "gofmt (simplify)" @gofmt -s -l -w $(FILES) 2>&1 @echo "goimports (if installed)" $(shell goimports -w $(FILES) 2>/dev/null) tools/bin/golangci-lint: @# Target: pull in specific version of golangci-lint (v1.64.8) curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./tools/bin v1.64.8 tiup-1.16.3/OWNERS000066400000000000000000000004151505422223000135160ustar00rootroot00000000000000# See the OWNERS docs at https://go.k8s.io/owners approvers: # tiup-maintainers: - bb7133 - xhebox # tiup-committers: 0 members emeritus_approvers: - lonng - lucklove - AstroProfundis reviewers: # tiup-reviewers: - nexustar - breezewish - srstack - iguoyr - kaaaaaaang tiup-1.16.3/README.md000066400000000000000000000042701505422223000140400ustar00rootroot00000000000000[![LICENSE](https://img.shields.io/github/license/pingcap/tidb.svg)](https://github.com/pingcap/tiup/blob/master/LICENSE) [![Language](https://img.shields.io/badge/Language-Go-blue.svg)](https://golang.org/) [![Go Report Card](https://goreportcard.com/badge/github.com/pingcap/tiup)](https://goreportcard.com/badge/github.com/pingcap/tiup) [![Coverage Status](https://codecov.io/gh/pingcap/tiup/branch/master/graph/badge.svg)](https://codecov.io/gh/pingcap/tiup/) [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fpingcap%2Ftiup.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fpingcap%2Ftiup?ref=badge_shield) # What is TiUP `tiup` is a tool to download and install [TiDB](https://docs.pingcap.com/tidb/stable/overview) components. ## Documentation - [English](https://docs.pingcap.com/tidb/stable/tiup-documentation-guide) - [简体中文](https://docs.pingcap.com/zh/tidb/stable/tiup-documentation-guide) ## Blog - [English](https://pingcap.com/blog/) - [简体中文](https://pingcap.com/blog-cn/) ## Installation ```sh curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh ``` ## Quick start ### Run playground ```sh tiup playground ``` ### Install components ```sh tiup install tidb tikv pd ``` ### Uninstall components ```sh tiup uninstall tidb tikv pd ``` ### Update components ```sh tiup update --all ``` ## Usage After installing `tiup`, you can use it to install binaries of TiDB components and create clusters. See our [doc](doc/user/README.md) for more information on how to use TiUP. ## Contributing to TiUP Contributions of code, tests, docs, and bug reports are welcome! To get started, take a look at our [open issues](https://github.com/pingcap/tiup/issues). For docs on how to build, test, and run TiUP, see our [dev docs](doc/dev/README.md). See also the [Contribution Guide](https://github.com/pingcap/community/blob/master/contributors/README.md) in PingCAP's [community](https://github.com/pingcap/community) repo. ## License [![FOSSA Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fpingcap%2Ftiup.svg?type=large)](https://app.fossa.com/projects/git%2Bgithub.com%2Fpingcap%2Ftiup?ref=badge_large) tiup-1.16.3/SECURITY.md000066400000000000000000000027651505422223000143610ustar00rootroot00000000000000# Security Vulnerability Disclosure and Response Process TiDB is a fast-growing open source database. To ensure its security, a security vulnerability disclosure and response process is adopted. The primary goal of this process is to reduce the total exposure time of users to publicly known vulnerabilities. To quickly fix vulnerabilities of TiDB products, the security team is responsible for the entire vulnerability management process, including internal communication and external disclosure. If you find a vulnerability or encounter a security incident involving vulnerabilities of TiDB products, please report it as soon as possible to the TiDB security team (security@tidb.io). Please kindly help provide as much vulnerability information as possible in the following format: - Issue title*: - Overview*: - Affected components and version number*: - CVE number (if any): - Vulnerability verification process*: - Contact information*: The asterisk (*) indicates the required field. # Response Time The TiDB security team will confirm the vulnerabilities and contact you within 2 working days after your submission. We will publicly thank you after fixing the security vulnerability. To avoid negative impact, please keep the vulnerability confidential until we fix it. We would appreciate it if you could obey the following code of conduct: The vulnerability will not be disclosed until TiDB releases a patch for it. The details of the vulnerability, for example, exploits code, will not be disclosed. tiup-1.16.3/cmd/000077500000000000000000000000001505422223000133215ustar00rootroot00000000000000tiup-1.16.3/cmd/clean.go000066400000000000000000000051331505422223000147340ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "os" "path/filepath" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" gops "github.com/shirou/gopsutil/process" "github.com/spf13/cobra" ) func newCleanCmd() *cobra.Command { var all bool cmd := &cobra.Command{ Use: "clean ", Short: "Clean the data of instantiated components", RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() if len(args) == 0 && !all { return cmd.Help() } return cleanData(env, args, all) }, } cmd.Flags().BoolVar(&all, "all", false, "Clean all data of instantiated components") return cmd } func cleanData(env *environment.Environment, names []string, all bool) error { dataDir := env.LocalPath(localdata.DataParentDir) if utils.IsNotExist(dataDir) { return nil } dirs, err := os.ReadDir(dataDir) if err != nil { return err } clean := set.NewStringSet(names...) for _, dir := range dirs { if !dir.IsDir() { continue } if !all && !clean.Exist(dir.Name()) { continue } process, err := env.Profile().ReadMetaFile(dir.Name()) if err != nil { return err } if process == nil { fmt.Fprintf(os.Stderr, "Can't clean directory due to missing meta file: %s\n", filepath.Join(dataDir, dir.Name())) continue } if p, err := gops.NewProcess(int32(process.Pid)); err == nil { pName, err := p.Name() if err != nil { fmt.Fprintf(os.Stderr, "Failed to get process info for `%s`, pid: %v\n", process.Component, process.Pid) } else { if pName != "tiup-playground" { fmt.Printf("Process name mismatch (`%s` != `tiup-playground`, not killing it.\n", pName) } else { fmt.Printf("Kill instance of `%s`, pid: %v\n", process.Component, process.Pid) if err := p.Kill(); err != nil { return err } } } } if err := os.RemoveAll(filepath.Join(dataDir, dir.Name())); err != nil { return err } fmt.Printf("Clean instance of `%s`, directory: %s\n", process.Component, process.Dir) } return nil } tiup-1.16.3/cmd/env.go000066400000000000000000000026571505422223000144520ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "os" "github.com/pingcap/tiup/pkg/localdata" "github.com/spf13/cobra" ) var envList = []string{ localdata.EnvNameHome, localdata.EnvNameSSHPassPrompt, localdata.EnvNameSSHPath, localdata.EnvNameSCPPath, localdata.EnvNameKeepSourceTarget, localdata.EnvNameMirrorSyncScript, localdata.EnvNameLogPath, localdata.EnvNameDebug, } func newEnvCmd() *cobra.Command { cmd := &cobra.Command{ Use: "env [name1...N]", Short: "Show the list of system environment variable that related to TiUP", RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { showEnvList(true, envList...) return nil } showEnvList(false, args...) return nil }, } return cmd } func showEnvList(withKey bool, names ...string) { for _, name := range names { if withKey { fmt.Printf("%s=\"%s\"\n", name, os.Getenv(name)) } else { fmt.Printf("%s\n", os.Getenv(name)) } } } tiup-1.16.3/cmd/history.go000066400000000000000000000054771505422223000153660ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "encoding/json" "fmt" "strconv" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/tui" "github.com/spf13/cobra" ) // newHistoryCmd history func newHistoryCmd() *cobra.Command { rows := 100 var displayMode string var all bool cmd := &cobra.Command{ Use: "history ", Short: "Display the historical execution record of TiUP, displays 100 lines by default", RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { r, err := strconv.Atoi(args[0]) if err != nil { return fmt.Errorf("%s: numeric argument required", args[0]) } rows = r } env := environment.GlobalEnv() rows, err := env.GetHistory(rows, all) if err != nil { return err } if displayMode == "json" { for _, r := range rows { rBytes, err := json.Marshal(r) if err != nil { continue } fmt.Println(string(rBytes)) } return nil } var table [][]string table = append(table, []string{"Date", "Command", "Code"}) for _, r := range rows { table = append(table, []string{ r.Date.Format("2006-01-02T15:04:05"), r.Command, strconv.Itoa(r.Code), }) } tui.PrintTable(table, true) fmt.Printf("history log save path: %s\n", env.LocalPath(environment.HistoryDir)) return nil }, } cmd.Flags().StringVar(&displayMode, "format", "default", "The format of output, available values are [default, json]") cmd.Flags().BoolVar(&all, "all", false, "Display all execution history") cmd.AddCommand(newHistoryCleanupCmd()) return cmd } func newHistoryCleanupCmd() *cobra.Command { var retainDays int var all bool var skipConfirm bool cmd := &cobra.Command{ Use: "cleanup", Short: "delete all execution history", RunE: func(cmd *cobra.Command, args []string) error { if retainDays < 0 { return errors.Errorf("retain-days cannot be less than 0") } if all { retainDays = 0 } env := environment.GlobalEnv() return env.DeleteHistory(retainDays, skipConfirm) }, } cmd.Flags().IntVar(&retainDays, "retain-days", 60, "Number of days to keep history for deletion") cmd.Flags().BoolVar(&all, "all", false, "Delete all history") cmd.Flags().BoolVarP(&skipConfirm, "yes", "y", false, "Skip all confirmations and assumes 'yes'") return cmd } tiup-1.16.3/cmd/install.go000066400000000000000000000027621505422223000153250ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "github.com/pingcap/tiup/pkg/environment" "github.com/spf13/cobra" ) func newInstallCmd() *cobra.Command { var force bool cmd := &cobra.Command{ Use: "install [:version] [component2...N]", Short: "Install a specific version of a component", Long: `Install a specific version of a component. The component can be specified by or :. The latest stable version will be installed if there is no version specified. You can install multiple components at once, or install multiple versions of the same component: tiup install tidb:v3.0.5 tikv pd tiup install tidb:v3.0.5 tidb:v3.0.8 tikv:v3.0.9`, RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() if len(args) == 0 { return cmd.Help() } return env.UpdateComponents(args, false, force) }, } cmd.Flags().BoolVar(&force, "force", false, "If the specified version was already installed, force a reinstallation") return cmd } tiup-1.16.3/cmd/link.go000066400000000000000000000021041505422223000146020ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "github.com/pingcap/tiup/pkg/environment" "github.com/spf13/cobra" ) func newLinkCmd() *cobra.Command { cmd := &cobra.Command{ Use: "link [:version]", Short: "Link component binary to $TIUP_HOME/bin/", Long: `[experimental] Link component binary to $TIUP_HOME/bin/`, RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() if len(args) != 1 { return cmd.Help() } component, version := environment.ParseCompVersion(args[0]) return env.Link(component, version) }, } return cmd } tiup-1.16.3/cmd/list.go000066400000000000000000000142741505422223000146330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "os" "sort" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" "golang.org/x/mod/semver" ) type listOptions struct { installedOnly bool verbose bool showAll bool } func newListCmd() *cobra.Command { var opt listOptions cmd := &cobra.Command{ Use: "list [component]", Short: "List the available TiDB components or versions", Long: `List the available TiDB components if you don't specify any component name, or list the available versions of a specific component. Display a list of local caches by default. Use the --installed flag to hide components or versions which have not been installed. # List all installed components tiup list --installed # List all installed versions of TiDB tiup list tidb --installed`, SilenceUsage: true, SilenceErrors: true, RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() switch len(args) { case 0: result, err := showComponentList(env, opt) result.print() return err case 1: result, err := showComponentVersions(env, args[0], opt) result.print() return err default: return cmd.Help() } }, } cmd.Flags().BoolVar(&opt.installedOnly, "installed", false, "List installed components only.") cmd.Flags().BoolVar(&opt.verbose, "verbose", false, "Show detailed component information.") cmd.Flags().BoolVar(&opt.showAll, "all", false, "Show all components include hidden ones.") return cmd } type listResult struct { header string cmpTable [][]string } func (lr *listResult) print() { if lr == nil { return } fmt.Printf("%s", lr.header) tui.PrintTable(lr.cmpTable, true) } func showComponentList(env *environment.Environment, opt listOptions) (*listResult, error) { if !opt.installedOnly { err := env.V1Repository().UpdateComponentManifests() if err != nil { tui.ColorWarningMsg.Fprint(os.Stderr, "Warn: Update component manifest failed, err_msg=[", err.Error(), "]\n") } } installed, err := env.Profile().InstalledComponents() if err != nil { return nil, err } var cmpTable [][]string if opt.verbose { cmpTable = append(cmpTable, []string{"Name", "Owner", "Installed", "Platforms", "Description"}) } else { cmpTable = append(cmpTable, []string{"Name", "Owner", "Description"}) } index := v1manifest.Index{} _, exists, err := env.V1Repository().LocalLoadManifest(&index) if err != nil { return nil, err } if !exists { return nil, errors.Errorf("unreachable: index.json not found in manifests directory") } localComponents := set.NewStringSet(installed...) compIDs := []string{} components := index.ComponentList() for id := range components { compIDs = append(compIDs, id) } sort.Strings(compIDs) for _, id := range compIDs { comp := components[id] if opt.installedOnly && !localComponents.Exist(id) { continue } if (!opt.installedOnly && !opt.showAll) && comp.Hidden { continue } filename := v1manifest.ComponentManifestFilename(id) manifest, err := env.V1Repository().LocalLoadComponentManifest(&comp, filename) if err != nil { return nil, err } if manifest == nil { continue } if opt.verbose { installStatus := "" if localComponents.Exist(id) { versions, err := env.Profile().InstalledVersions(id) if err != nil { return nil, err } installStatus = strings.Join(versions, ",") } var platforms []string for p := range manifest.Platforms { platforms = append(platforms, p) } cmpTable = append(cmpTable, []string{ id, comp.Owner, installStatus, strings.Join(platforms, ","), manifest.Description, }) } else { cmpTable = append(cmpTable, []string{ id, comp.Owner, manifest.Description, }) } } return &listResult{ header: "Available components:\n", cmpTable: cmpTable, }, nil } func showComponentVersions(env *environment.Environment, component string, opt listOptions) (*listResult, error) { var comp *v1manifest.Component var err error if opt.installedOnly { comp, err = env.V1Repository().LocalComponentManifest(component, false) } else { comp, err = env.V1Repository().GetComponentManifest(component, false) } if err != nil { return nil, errors.Annotate(err, "failed to fetch component") } versions, err := env.Profile().InstalledVersions(component) if err != nil { return nil, err } installed := set.NewStringSet(versions...) var cmpTable [][]string cmpTable = append(cmpTable, []string{"Version", "Installed", "Release", "Platforms"}) platforms := make(map[string][]string) released := make(map[string]string) for plat := range comp.Platforms { versions := comp.VersionList(plat) for ver, verinfo := range versions { if ver == comp.Nightly { key := fmt.Sprintf("%s -> %s", utils.NightlyVersionAlias, comp.Nightly) platforms[key] = append(platforms[key], plat) released[key] = verinfo.Released } platforms[ver] = append(platforms[ver], plat) released[ver] = verinfo.Released } } verList := []string{} for v := range platforms { verList = append(verList, v) } sort.Slice(verList, func(p, q int) bool { return semver.Compare(verList[p], verList[q]) < 0 }) for _, v := range verList { installStatus := "" if installed.Exist(v) { installStatus = "YES" } else if opt.installedOnly { continue } cmpTable = append(cmpTable, []string{v, installStatus, released[v], strings.Join(platforms[v], ",")}) } return &listResult{ header: fmt.Sprintf("Available versions for %s:\n", component), cmpTable: cmpTable, }, nil } tiup-1.16.3/cmd/mirror.go000066400000000000000000000663231505422223000151740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "bytes" "context" "encoding/json" "errors" "fmt" "os" "path" "path/filepath" "runtime" "sort" "strings" "time" "github.com/fatih/color" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/repository/model" ru "github.com/pingcap/tiup/pkg/repository/utils" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/server/rotate" "github.com/spf13/cobra" "github.com/spf13/pflag" ) func newMirrorCmd() *cobra.Command { cmd := &cobra.Command{ Use: "mirror ", Short: "Manage a repository mirror for TiUP components", Long: `The 'mirror' command is used to manage a component repository for TiUP, you can use it to create a private repository, or to add new component to an existing repository. The repository can be used either online or offline. It also provides some useful utilities to help managing keys, users and versions of components or the repository itself.`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return cmd.Help() } return nil }, } cmd.AddCommand( newMirrorInitCmd(), newMirrorSignCmd(), newMirrorGenkeyCmd(), newMirrorCloneCmd(), newMirrorMergeCmd(), newMirrorPublishCmd(), newMirrorShowCmd(), newMirrorSetCmd(), newMirrorModifyCmd(), newMirrorRenewCmd(), newMirrorGrantCmd(), newMirrorRotateCmd(), newTransferOwnerCmd(), ) return cmd } // the `mirror sign` sub command func newMirrorSignCmd() *cobra.Command { privPath := "" timeout := 10 cmd := &cobra.Command{ Use: "sign ", Short: "Add signatures to a manifest file", Long: fmt.Sprintf("Add signatures to a manifest file; if no key file is specified, ~/.tiup/keys/%s will be used", localdata.DefaultPrivateKeyName), RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() if len(args) < 1 { return cmd.Help() } if privPath == "" { privPath = env.Profile().Path(localdata.KeyInfoParentDir, localdata.DefaultPrivateKeyName) } privKey, err := loadPrivKey(privPath) if err != nil { return err } if strings.HasPrefix(args[0], "http") { client := utils.NewHTTPClient(time.Duration(timeout)*time.Second, nil) data, err := client.Get(context.TODO(), args[0]) if err != nil { return err } if data, err = v1manifest.SignManifestData(data, privKey); err != nil { return err } if _, err = client.Post(context.TODO(), args[0], bytes.NewBuffer(data)); err != nil { return err } return nil } data, err := os.ReadFile(args[0]) if err != nil { return perrs.Annotatef(err, "open manifest file %s", args[0]) } if data, err = v1manifest.SignManifestData(data, privKey); err != nil { return err } if err = utils.WriteFile(args[0], data, 0664); err != nil { return perrs.Annotatef(err, "write manifest file %s", args[0]) } return nil }, } cmd.Flags().StringVarP(&privPath, "key", "k", "", "Specify the private key path") cmd.Flags().IntVarP(&timeout, "timeout", "", timeout, "Specify the timeout when access the network") return cmd } // the `mirror show` sub command func newMirrorShowCmd() *cobra.Command { cmd := &cobra.Command{ Use: "show", Short: "Show mirror address", Long: `Show current mirror address`, RunE: func(cmd *cobra.Command, args []string) error { fmt.Println(environment.Mirror()) return nil }, } return cmd } // the `mirror set` sub command func newMirrorSetCmd() *cobra.Command { var ( root string reset bool silent bool ) cmd := &cobra.Command{ Use: "set ", Short: "Set mirror address", Long: `Set mirror address, the address could be an URL or a path to the repository directory. Relative paths will not be expanded, so absolute paths are recommended. The root manifest in $TIUP_HOME will be replaced with the one in given repository automatically.`, RunE: func(cmd *cobra.Command, args []string) error { if !reset && len(args) != 1 { return cmd.Help() } var addr string if reset { addr = repository.DefaultMirror } else { addr = args[0] } // expand relative path if !strings.HasPrefix(addr, "http") { var err error addr, err = filepath.Abs(addr) if err != nil { return err } } profile := localdata.InitProfile() if err := profile.ResetMirror(addr, root); err != nil { log.Errorf("Failed to set mirror: %s\n", err.Error()) return err } if !silent { fmt.Printf("Successfully set mirror to %s\n", addr) } return nil }, } cmd.Flags().StringVarP(&root, "root", "r", root, "Specify the path of `root.json`") cmd.Flags().BoolVar(&reset, "reset", false, "Reset mirror to use the default address.") cmd.Flags().BoolVar(&silent, "silent", false, "Skip non-warning messages.") return cmd } // the `mirror grant` sub command func newMirrorGrantCmd() *cobra.Command { name := "" privPath := "" cmd := &cobra.Command{ Use: "grant ", Short: "grant a new owner", Long: "grant a new owner to current mirror", RunE: func(cmd *cobra.Command, args []string) error { if len(args) < 1 { return cmd.Help() } id := args[0] if name == "" { fmt.Printf("No --name is given, using %s as default\n", id) name = id } // the privPath can point to a public key because the Public method of KeyInfo works on both priv and pub keys privKey, err := loadPrivKey(privPath) if err != nil { return err } pubKey, err := privKey.Public() if err != nil { return err } keyID, err := pubKey.ID() if err != nil { return err } env := environment.GlobalEnv() err = env.V1Repository().Mirror().Grant(id, name, pubKey) if err == nil { log.Infof("Granted new owner %s(%s) with public key %s.", id, name, keyID) } return err }, } cmd.Flags().StringVarP(&name, "name", "n", "", "Specify the name of the owner, default: id of the owner") cmd.Flags().StringVarP(&privPath, "key", "k", "", "Specify the path to the private or public key of the owner") return cmd } // the `mirror modify` sub command func newMirrorModifyCmd() *cobra.Command { var privPath string desc := "" standalone := false hidden := false yanked := false cmd := &cobra.Command{ Use: "modify [:version] [flags]", Short: "Modify published component", Long: "Modify component attributes (hidden, standalone, yanked)", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } component := args[0] env := environment.GlobalEnv() comp, ver := environment.ParseCompVersion(component) m, err := env.V1Repository().GetComponentManifest(comp, true) if err != nil { return err } v1manifest.RenewManifest(m, time.Now()) if desc != "" { m.Description = desc } flagSet := set.NewStringSet() cmd.Flags().Visit(func(f *pflag.Flag) { flagSet.Insert(f.Name) }) publishInfo := &model.PublishInfo{} if ver == "" { if flagSet.Exist("standalone") { publishInfo.Stand = &standalone } if flagSet.Exist("hide") { publishInfo.Hide = &hidden } if flagSet.Exist("yank") { publishInfo.Yank = &yanked } } else if flagSet.Exist("yank") { if ver.IsNightly() { return perrs.New("nightly version can't be yanked") } for p := range m.Platforms { vi, ok := m.Platforms[p][ver.String()] if !ok { continue } vi.Yanked = yanked m.Platforms[p][ver.String()] = vi } } manifest, err := sign(privPath, m) if err != nil { return err } return env.V1Repository().Mirror().Publish(manifest, publishInfo) }, } cmd.Flags().StringVarP(&privPath, "key", "k", "", "private key path") cmd.Flags().StringVarP(&desc, "desc", "", desc, "description of the component") cmd.Flags().BoolVarP(&standalone, "standalone", "", standalone, "can this component run directly") cmd.Flags().BoolVarP(&hidden, "hide", "", hidden, "is this component visible in list") cmd.Flags().BoolVarP(&yanked, "yank", "", yanked, "is this component deprecated") return cmd } // the `mirror renew` sub command func newMirrorRenewCmd() *cobra.Command { var privPath string var days int cmd := &cobra.Command{ Use: "renew [flags]", Short: "Renew the manifest of a published component.", Long: "Renew the manifest of a published component, bump version and extend its expire time.", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } component := args[0] env := environment.GlobalEnv() comp, _ := environment.ParseCompVersion(component) m, err := env.V1Repository().GetComponentManifest(comp, true) if err != nil { // ignore manifest expiration error if !v1manifest.IsExpirationError(perrs.Cause(err)) { return err } fmt.Printf("Ignoring expiration error: %s", err) } if m == nil { return errors.New("got nil manifest") } if days > 0 { v1manifest.RenewManifest(m, time.Now(), time.Hour*24*time.Duration(days)) } else { v1manifest.RenewManifest(m, time.Now()) } manifest, err := sign(privPath, m) if err != nil { return err } return env.V1Repository().Mirror().Publish(manifest, &model.PublishInfo{}) }, } cmd.Flags().StringVarP(&privPath, "key", "k", "", "private key path") cmd.Flags().IntVar(&days, "days", 0, "after how many days the manifest expires, 0 means builtin default values of manifests") return cmd } // the `mirror transfer-owner` sub command func newTransferOwnerCmd() *cobra.Command { addr := "0.0.0.0:8080" cmd := &cobra.Command{ Use: "transfer-owner ", Short: "Transfer component to another owner", Long: "Transfer component to another owner, this must be done on the server.", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } component := args[0] newOwnerName := args[1] env := environment.GlobalEnv() // read current manifests index, err := env.V1Repository().FetchIndexManifest() if err != nil { return err } newOwner, found := index.Owners[newOwnerName] if !found { return fmt.Errorf("new owner '%s' is not in the available owner list", newOwnerName) } m, err := env.V1Repository().GetComponentManifest(component, true) if err != nil { return err } v1manifest.RenewManifest(m, time.Now()) // validate new owner's authorization newCompManifest, err := rotate.ServeComponent(addr, &newOwner, m) if err != nil { return err } // update owner info return env.V1Repository().Mirror().Publish(newCompManifest, &model.PublishInfo{ Owner: newOwnerName, }) }, } cmd.Flags().StringVarP(&addr, "addr", "", addr, "listen address:port when starting the temp server for signing") return cmd } // the `mirror rotate` sub command func newMirrorRotateCmd() *cobra.Command { addr := "0.0.0.0:8080" keyDir := "" cmd := &cobra.Command{ Use: "rotate", Short: "Rotate root.json", Long: "Rotate root.json make it possible to modify root.json", RunE: func(cmd *cobra.Command, args []string) error { e, err := environment.InitEnv(repoOpts, repository.MirrorOptions{KeyDir: keyDir}) if err != nil { if errors.Is(perrs.Cause(err), v1manifest.ErrLoadManifest) { log.Warnf("Please check for root manifest file, you may download one from the repository mirror, or try `tiup mirror set` to force reset it.") } return err } environment.SetGlobalEnv(e) root, err := editLatestRootManifest() if err != nil { return err } manifest, err := rotate.ServeRoot(addr, root) if err != nil { return err } return environment.GlobalEnv().V1Repository().Mirror().Rotate(manifest) }, } cmd.Flags().StringVarP(&addr, "addr", "", addr, "listen address:port when starting the temp server for rotating") cmd.Flags().StringVarP(&keyDir, "key-dir", "", keyDir, "specify the directory where stores the private keys") return cmd } func editLatestRootManifest() (*v1manifest.Root, error) { root, err := environment.GlobalEnv().V1Repository().FetchRootManifest() if err != nil { return nil, err } file, err := os.CreateTemp(os.TempDir(), "*.root.json") if err != nil { return nil, perrs.Annotate(err, "create temp file for root.json") } defer file.Close() name := file.Name() encoder := json.NewEncoder(file) encoder.SetIndent("", " ") if err := encoder.Encode(root); err != nil { return nil, perrs.Annotate(err, "encode root.json") } if err := file.Close(); err != nil { return nil, perrs.Annotatef(err, "close %s", name) } if err := utils.OpenFileInEditor(name); err != nil { return nil, err } root = &v1manifest.Root{} file, err = os.Open(name) if err != nil { return nil, perrs.Annotatef(err, "open %s", name) } defer file.Close() if err := json.NewDecoder(file).Decode(root); err != nil { return nil, perrs.Annotatef(err, "decode %s", name) } return root, nil } func loadPrivKey(privPath string) (*v1manifest.KeyInfo, error) { env := environment.GlobalEnv() if privPath == "" { privPath = env.Profile().Path(localdata.KeyInfoParentDir, localdata.DefaultPrivateKeyName) } // Get the private key f, err := os.Open(privPath) if err != nil { return nil, err } defer f.Close() ki := v1manifest.KeyInfo{} if err := json.NewDecoder(f).Decode(&ki); err != nil { return nil, perrs.Annotate(err, "decode key") } return &ki, nil } func loadPrivKeys(keysDir string) (map[string]*v1manifest.KeyInfo, error) { keys := map[string]*v1manifest.KeyInfo{} err := filepath.Walk(keysDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } ki, err := loadPrivKey(path) if err != nil { return err } id, err := ki.ID() if err != nil { return err } keys[id] = ki return nil }) if err != nil { return nil, err } return keys, nil } func sign(privPath string, signed v1manifest.ValidManifest) (*v1manifest.Manifest, error) { ki, err := loadPrivKey(privPath) if err != nil { return nil, err } return v1manifest.SignManifest(signed, ki) } // the `mirror publish` sub command func newMirrorPublishCmd() *cobra.Command { var privPath string goos := runtime.GOOS goarch := runtime.GOARCH desc := "" standalone := false hidden := false cmd := &cobra.Command{ Use: "publish ", Short: "Publish a component", Long: "Publish a component to the repository", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 4 { return cmd.Help() } component, version, tarpath, entry := args[0], args[1], args[2], args[3] flagSet := set.NewStringSet() cmd.Flags().Visit(func(f *pflag.Flag) { flagSet.Insert(f.Name) }) if err := validatePlatform(goos, goarch); err != nil { return err } hashes, length, err := ru.HashFile(tarpath) if err != nil { return err } fmt.Printf("uploading %s with %d bytes, sha256: %v ...\n", tarpath, length, hashes[v1manifest.SHA256]) tarfile, err := os.Open(tarpath) if err != nil { return perrs.Annotatef(err, "open tarball: %s", tarpath) } defer tarfile.Close() publishInfo := &model.PublishInfo{ ComponentData: &model.TarInfo{Reader: tarfile, Name: fmt.Sprintf("%s-%s-%s-%s.tar.gz", component, version, goos, goarch)}, } var reqErr error pubErr := utils.Retry(func() error { err := doPublish(component, version, entry, desc, publishInfo, hashes, length, standalone, hidden, privPath, goos, goarch, flagSet, ) if err != nil { // retry if the error is manifest too old or validation failed if err == repository.ErrManifestTooOld || errors.Is(perrs.Cause(err), utils.ErrValidateChecksum) || strings.Contains(err.Error(), "INVALID TARBALL") { fmt.Printf("server returned an error: %s, retry...\n", err) if _, ferr := tarfile.Seek(0, 0); ferr != nil { // reset the reader return ferr } return err // return err to trigger next retry } reqErr = err // keep the error info } return nil // return nil to end the retry loop }, utils.RetryOption{ Attempts: 10, Delay: time.Second * 2, Timeout: time.Minute * 10, }) if reqErr != nil { return reqErr } return pubErr }, } cmd.Flags().StringVarP(&privPath, "key", "k", "", "private key path") cmd.Flags().StringVarP(&goos, "os", "", goos, "the target operation system") cmd.Flags().StringVarP(&goarch, "arch", "", goarch, "the target system architecture") cmd.Flags().StringVarP(&desc, "desc", "", desc, "description of the component") cmd.Flags().BoolVarP(&standalone, "standalone", "", standalone, "can this component run directly") cmd.Flags().BoolVarP(&hidden, "hide", "", hidden, "is this component invisible on listing") return cmd } func doPublish( component, version, entry, desc string, publishInfo *model.PublishInfo, hashes map[string]string, length int64, standalone, hidden bool, privPath, goos, goarch string, flagSet set.StringSet, ) error { env := environment.GlobalEnv() env.V1Repository().PurgeTimestamp() m, err := env.V1Repository().GetComponentManifest(component, true) if err != nil { if perrs.Cause(err) != repository.ErrUnknownComponent { return err } fmt.Printf("Creating component %s\n", component) publishInfo.Stand = &standalone publishInfo.Hide = &hidden } else if flagSet.Exist("standalone") || flagSet.Exist("hide") { fmt.Println("This is not a new component, --standalone and --hide flag will be omitted") } m = repository.UpdateManifestForPublish(m, component, version, entry, goos, goarch, desc, v1manifest.FileHash{ Hashes: hashes, Length: uint(length), }) manifest, err := sign(privPath, m) if err != nil { return err } return env.V1Repository().Mirror().Publish(manifest, publishInfo) } func validatePlatform(goos, goarch string) error { // Only support any/any, don't support linux/any, any/amd64 .etc. if goos == "any" && goarch == "any" { return nil } switch goos + "/" + goarch { case "linux/amd64", "linux/arm64", "darwin/amd64", "darwin/arm64": return nil default: return perrs.Errorf("platform %s/%s not supported", goos, goarch) } } // the `mirror genkey` sub command func newMirrorGenkeyCmd() *cobra.Command { var ( showPublic bool saveKey bool name string ) cmd := &cobra.Command{ Use: "genkey", Short: "Generate a new key pair", Long: `Generate a new key pair that can be used to sign components.`, RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() privPath := env.Profile().Path(localdata.KeyInfoParentDir, name+".json") keyDir := filepath.Dir(privPath) if utils.IsNotExist(keyDir) { if err := os.Mkdir(keyDir, 0755); err != nil { return perrs.Annotate(err, "create private key dir") } } var ki *v1manifest.KeyInfo var err error if showPublic { ki, err = loadPrivKey(privPath) if err != nil { return err } pki, err := ki.Public() if err != nil { return err } id, err := pki.ID() if err != nil { return err } content, err := json.MarshalIndent(pki, "", "\t") if err != nil { return err } fmt.Printf("KeyID: %s\nKeyContent: \n%s\n", id, string(content)) } else { if utils.IsExist(privPath) { log.Warnf("Warning: private key already exists (%s), skipped", privPath) return nil } ki, err = v1manifest.GenKeyInfo() if err != nil { return err } f, err := os.Create(privPath) if err != nil { return err } defer f.Close() // set private key permission if err = f.Chmod(0600); err != nil { return err } if err := json.NewEncoder(f).Encode(ki); err != nil { return err } fmt.Printf("Private key has been written to %s\n", privPath) } if saveKey { pubKey, err := ki.Public() if err != nil { return err } pubPath, err := v1manifest.SaveKeyInfo(pubKey, "public", "") if err != nil { return err } fmt.Printf("Public key has been written to current working dir: %s\n", pubPath) } return nil }, } cmd.Flags().BoolVarP(&showPublic, "public", "p", showPublic, "Show public content") cmd.Flags().BoolVar(&saveKey, "save", false, "Save public key to a file in the current working dir") cmd.Flags().StringVarP(&name, "name", "n", "private", "The file name of the key") return cmd } // the `mirror init` sub command func newMirrorInitCmd() *cobra.Command { var ( keyDir string // Directory to write generated key files ) cmd := &cobra.Command{ Use: "init ", Short: "Initialize an empty repository", Long: `Initialize an empty TiUP repository at given path. The specified path must be an empty directory. If the path does not exist, a new directory will be created.`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } repoPath := args[0] // create the target path if not exist if utils.IsNotExist(repoPath) { var err error log.Infof("Target path \"%s\" does not exist, creating new directory...", repoPath) if err = os.Mkdir(repoPath, 0755); err != nil { return err } } // init requires an empty path to use empty, err := utils.IsEmptyDir(repoPath) if err != nil { return err } if !empty { return perrs.Errorf("the target path '%s' is not an empty directory", repoPath) } if keyDir == "" { keyDir = path.Join(repoPath, "keys") } return initRepo(repoPath, keyDir) }, } cmd.Flags().StringVarP(&keyDir, "key-dir", "k", "", "Path to write the private key files") return cmd } func initRepo(path, keyDir string) error { log.Infof("Initializing empty new repository at \"%s\", private keys will be stored in \"%s\"...", path, keyDir) err := v1manifest.Init(path, keyDir, time.Now().UTC()) if err != nil { log.Errorf("Initializing new repository failed.") return err } log.Infof("New repository initialized at \"%s\", private keys are stored in \"%s\".", path, keyDir) log.Infof("Use `%s` command to set and use the new repository.", color.CyanString("tiup mirror set %s", path)) return nil } // the `mirror merge` sub command func newMirrorMergeCmd() *cobra.Command { cmd := &cobra.Command{ Use: "merge [mirror-dir-N]", Example: ` tiup mirror merge tidb-community-v4.0.1 # merge v4.0.1 into current mirror tiup mirror merge tidb-community-v4.0.1 tidb-community-v4.0.2 # merge v4.0.1 and v4.0.2 into current mirror`, Short: "Merge two or more offline mirror", RunE: func(cmd *cobra.Command, args []string) error { if len(args) < 1 { return cmd.Help() } sources := args env := environment.GlobalEnv() baseMirror := env.V1Repository().Mirror() sourceMirrors := []repository.Mirror{} for _, source := range sources { sourceMirror := repository.NewMirror(source, repository.MirrorOptions{}) if err := sourceMirror.Open(); err != nil { return err } defer sourceMirror.Close() sourceMirrors = append(sourceMirrors, sourceMirror) } keys, err := loadPrivKeys(env.Profile().Path(localdata.KeyInfoParentDir)) if err != nil { return err } return repository.MergeMirror(keys, baseMirror, sourceMirrors...) }, } return cmd } // the `mirror clone` sub command func newMirrorCloneCmd() *cobra.Command { var ( options = repository.CloneOptions{Components: map[string]*[]string{}} components []string repo repository.Repository initialized bool ) initMirrorCloneExtraArgs := func(cmd *cobra.Command) error { initialized = true env := environment.GlobalEnv() repo = env.V1Repository() index, err := repo.FetchIndexManifest() if err != nil { return err } if index != nil && len(index.Components) > 0 { for name, comp := range index.Components { if comp.Yanked { continue } components = append(components, name) } } sort.Strings(components) for _, name := range components { options.Components[name] = new([]string) cmd.Flags().StringSliceVar(options.Components[name], name, nil, "Specify the versions for component "+name) } return nil } cmd := &cobra.Command{ Use: "clone [global version]", Example: ` tiup mirror clone /path/to/local --arch amd64,arm64 --os linux,darwin # Specify the architectures and OSs tiup mirror clone /path/to/local --os linux v6.1.0 v5.4.0 # Specify multiple versions tiup mirror clone /path/to/local --full # Build a full local mirror tiup mirror clone /path/to/local --tikv v4 --prefix # Specify the version via prefix tiup mirror clone /path/to/local --tidb all --pd all # Download all version for specific component`, Short: "Clone a local mirror from remote mirror and download all selected components", SilenceUsage: true, DisableFlagParsing: true, PreRunE: func(cmd *cobra.Command, args []string) error { return initMirrorCloneExtraArgs(cmd) }, RunE: func(cmd *cobra.Command, args []string) error { cmd.DisableFlagParsing = false err := cmd.ParseFlags(args) if err != nil { return err } args = cmd.Flags().Args() printHelp, _ := cmd.Flags().GetBool("help") if printHelp || len(args) < 1 { return cmd.Help() } if len(components) < 1 { return perrs.New("component list doesn't contain components") } if err = repo.Mirror().Open(); err != nil { return err } defer func() { err = repo.Mirror().Close() if err != nil { log.Errorf("Failed to close mirror: %s\n", err.Error()) } }() // format input versions versionList := make([]string, 0) for _, ver := range args[1:] { v, err := utils.FmtVer(ver) if err != nil { return err } versionList = append(versionList, v) } return repository.CloneMirror(repo, components, args[0], versionList, options) }, } cmd.Flags().SortFlags = false cmd.Flags().BoolVarP(&options.Full, "full", "f", false, "Build a full mirrors repository") cmd.Flags().StringSliceVarP(&options.Archs, "arch", "a", []string{"amd64", "arm64"}, "Specify the downloading architecture") cmd.Flags().StringSliceVarP(&options.OSs, "os", "o", []string{"linux", "darwin"}, "Specify the downloading os") cmd.Flags().BoolVarP(&options.Prefix, "prefix", "", false, "Download the version with matching prefix") cmd.Flags().UintVarP(&options.Jobs, "jobs", "", 1, "Specify the number of concurrent download jobs") originHelpFunc := cmd.HelpFunc() cmd.SetHelpFunc(func(command *cobra.Command, args []string) { if !initialized { _ = initMirrorCloneExtraArgs(command) } originHelpFunc(command, args) }) return cmd } tiup-1.16.3/cmd/root.go000066400000000000000000000171201505422223000146340ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "errors" "fmt" "os" "os/exec" "strings" "syscall" "github.com/fatih/color" "github.com/google/uuid" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/environment" tiupexec "github.com/pingcap/tiup/pkg/exec" "github.com/pingcap/tiup/pkg/localdata" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/version" "github.com/spf13/cobra" ) var ( rootCmd *cobra.Command repoOpts repository.Options eventUUID = uuid.New().String() log = logprinter.NewLogger("") // use default logger ) // arguments var ( binPath string tag string ) func init() { cobra.EnableCommandSorting = false _ = os.Setenv(localdata.EnvNameTelemetryEventUUID, eventUUID) rootCmd = &cobra.Command{ Use: `tiup [flags] [args...] tiup [flags] [args...]`, Long: `TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system. You can run a specific version of a component via "tiup [:version]". If no version number is specified, the latest version installed locally will be used. If the specified component does not have any version installed locally, the latest stable version will be downloaded from the repository.`, Example: ` $ tiup playground # Quick start $ tiup playground nightly # Start a playground with the latest nightly version $ tiup install [:version] # Install a component of specific version $ tiup update --all # Update all installed components to the latest version $ tiup update --nightly # Update all installed components to the nightly version $ tiup update --self # Update the "tiup" to the latest version $ tiup list # Fetch the latest supported components list $ tiup status # Display all running/terminated instances $ tiup clean # Clean the data of running/terminated instance (Kill process if it's running) $ tiup clean --all # Clean the data of all running/terminated instances`, SilenceErrors: true, DisableFlagParsing: true, Args: func(cmd *cobra.Command, args []string) error { // Support `tiup ` return nil }, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { switch cmd.Name() { case "init", "rotate", "set": if cmd.HasParent() && cmd.Parent().Name() == "mirror" { // skip environment init break } fallthrough default: e, err := environment.InitEnv(repoOpts, repository.MirrorOptions{}) if err != nil { if errors.Is(perrs.Cause(err), v1manifest.ErrLoadManifest) { log.Warnf("Please check for root manifest file, you may download one from the repository mirror, or try `tiup mirror set` to force reset it.") } return err } environment.SetGlobalEnv(e) } return nil }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return cmd.Help() } env := environment.GlobalEnv() // TBD: change this flag to subcommand // We assume the first unknown parameter is the component name and following // parameters will be transparent passed because registered flags and subcommands // will be parsed correctly. // e.g: tiup --tag mytag --rm playground --db 3 --pd 3 --kv 4 // => run "playground" with parameters "--db 3 --pd 3 --kv 4" // tiup --tag mytag --binpath /xxx/tikv-server tikv switch args[0] { case "--help", "-h": return cmd.Help() case "--version", "-v": fmt.Println(version.NewTiUPVersion().String()) return nil case "--binary": if len(args) < 2 { return fmt.Errorf("flag needs an argument: %s", args[0]) } component, ver := environment.ParseCompVersion(args[1]) selectedVer, err := env.SelectInstalledVersion(component, ver) if err != nil { return err } binaryPath, err := env.BinaryPath(component, selectedVer) if err != nil { return err } fmt.Println(binaryPath) return nil case "--binpath": if len(args) < 2 { return fmt.Errorf("flag %s needs an argument", args[0]) } binPath = args[1] args = args[2:] case "--tag", "-T": if len(args) < 2 { return fmt.Errorf("flag %s needs an argument", args[0]) } tag = args[1] args = args[2:] } // component may use tag from environment variable. as workaround, make tiup set the same tag for i := 0; i < len(args)-1; i++ { if args[i] == "--tag" || args[i] == "-T" { tag = args[i+1] } } if len(args) < 1 { return cmd.Help() } componentSpec := args[0] args = args[1:] if len(args) > 0 && args[0] == "--" { args = args[1:] } return tiupexec.RunComponent(env, tag, componentSpec, binPath, args) }, SilenceUsage: true, // implement auto completion for tiup components ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { env := environment.GlobalEnv() if len(args) == 0 { var result []string installed, _ := env.Profile().InstalledComponents() for _, comp := range installed { if strings.HasPrefix(comp, toComplete) { result = append(result, comp) } } return result, cobra.ShellCompDirectiveNoFileComp } component, version := environment.ParseCompVersion(args[0]) selectedVer, err := env.SelectInstalledVersion(component, version) if err != nil { return nil, cobra.ShellCompDirectiveNoFileComp } binaryPath, err := env.BinaryPath(component, selectedVer) if err != nil { return nil, cobra.ShellCompDirectiveNoFileComp } argv := []string{binaryPath, "__complete"} argv = append(append(argv, args[1:]...), toComplete) _ = syscall.Exec(binaryPath, argv, os.Environ()) return nil, cobra.ShellCompDirectiveNoFileComp }, } // useless, exist to generate help information rootCmd.Flags().String("binary", "", "Print binary path of a specific version of a component `[:version]`\n"+ "and the latest version installed will be selected if no version specified") rootCmd.Flags().StringP("tag", "T", "", "[Deprecated] Specify a tag for component instance") rootCmd.Flags().String("binpath", "", "Specify the binary path of component instance") rootCmd.Flags().BoolP("version", "v", false, "Print the version of tiup") rootCmd.AddCommand( newInstallCmd(), newListCmd(), newUninstallCmd(), newUpdateCmd(), newStatusCmd(), newCleanCmd(), newMirrorCmd(), newEnvCmd(), newHistoryCmd(), newLinkCmd(), newUnlinkCmd(), ) } // Execute parses the command line arguments and calls proper functions func Execute() { code := 0 err := rootCmd.Execute() if err != nil { // use exit code from component var exitErr *exec.ExitError if errors.As(err, &exitErr) { code = exitErr.ExitCode() } else { fmt.Fprintln(os.Stderr, color.RedString("Error: %v", err)) code = 1 } } color.Unset() if code != 0 { os.Exit(code) } } tiup-1.16.3/cmd/status.go000066400000000000000000000043151505422223000151760ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "os" "path/filepath" "strconv" "strings" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" gops "github.com/shirou/gopsutil/process" "github.com/spf13/cobra" ) func newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "List the status of instantiated components", RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() if len(args) > 0 { return cmd.Help() } return showStatus(env) }, } return cmd } func showStatus(env *environment.Environment) error { var table [][]string table = append(table, []string{"Name", "Component", "PID", "Status", "Created Time", "Directory", "Binary", "Args"}) if dataDir := env.LocalPath(localdata.DataParentDir); utils.IsExist(dataDir) { dirs, err := os.ReadDir(dataDir) if err != nil { return err } for _, dir := range dirs { if !dir.IsDir() { continue } process, err := env.Profile().ReadMetaFile(dir.Name()) if err != nil { return err } if process == nil { // If the path doesn't contain the meta file, which means startup interrupted _ = os.RemoveAll(env.LocalPath(filepath.Join(localdata.DataParentDir, dir.Name()))) continue } status := "TERM" if exist, err := gops.PidExists(int32(process.Pid)); err == nil && exist { status = "RUNNING" } table = append(table, []string{ dir.Name(), process.Component, strconv.Itoa(process.Pid), status, process.CreatedTime, process.Dir, process.Exec, strings.Join(process.Args, " "), }) } } tui.PrintTable(table, true) return nil } tiup-1.16.3/cmd/testdata/000077500000000000000000000000001505422223000151325ustar00rootroot00000000000000tiup-1.16.3/cmd/testdata/test-v1.1.1-darwin-amd64.sha1000066400000000000000000000000511505422223000217000ustar00rootroot00000000000000954b5bb35b07d377b92532b57649dd32aeffcbb2 tiup-1.16.3/cmd/testdata/test-v1.1.1-darwin-amd64.tar.gz000066400000000000000000000002671505422223000222620ustar00rootroot00000000000000&Rg^ҽ0afHB :1Y=t'+SJydSD)(mw!ݰʹ M_zB\[?3CV,Rv#&McNA*~{9np(tiup-1.16.3/cmd/testdata/test-v1.1.1-linux-amd64.sha1000066400000000000000000000000511505422223000215530ustar00rootroot0000000000000031573169ad32ba90a89990bd73f334934ba3becf tiup-1.16.3/cmd/testdata/test-v1.1.1-linux-amd64.tar.gz000066400000000000000000000002671505422223000221350ustar00rootroot00000000000000-Rg^ҽ0afHB :1Y=t'+SJydSD)(mw!ݰʹ M_zB\[?3CV,Rv#&McNA*~{9np(tiup-1.16.3/cmd/testdata/tiup-component-test.index000066400000000000000000000012351505422223000221220ustar00rootroot00000000000000{ "description": "test", "modified": "2020-02-26T15:20:35+08:00", "nightly": { "version": "nightly", "entry": "bin/test", "date": "2020-02-27T10:10:10+08:00", "platforms": ["darwin/amd64", "linux/x84_64"] }, "versions": [ { "version": "v1.0.0", "entry": "bin/test", "date": "2020-02-27T10:10:10+08:00", "platforms": ["darwin/amd64", "linux/x84_64"] }, { "version": "v1.1.1", "entry": "bin/test", "date": "2020-02-27T10:10:10+08:00", "platforms": ["darwin/amd64", "linux/x84_64"] } ] }tiup-1.16.3/cmd/testdata/tiup-manifest.index000066400000000000000000000003721505422223000207520ustar00rootroot00000000000000{ "description": "TiDB components list", "modified": "2020-02-26T15:20:35+08:00", "components": [ { "name": "test", "desc": "desc", "platforms": ["darwin/amd64", "linux/amd64"] } ] }tiup-1.16.3/cmd/uninstall.go000066400000000000000000000105511505422223000156630ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "os" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newUninstallCmd() *cobra.Command { var all, self bool cmdUnInst := &cobra.Command{ Use: "uninstall :", Short: "Uninstall components or versions of a component", Long: `If you specify a version number, uninstall the specified version of the component. You must use --all explicitly if you want to remove all components or versions which are installed. You can uninstall multiple components or multiple versions of a component at once. The --self flag which is used to uninstall tiup. # Uninstall tiup tiup uninstall --self # Uninstall the specific version a component tiup uninstall tidb:v3.0.10 # Uninstall all version of specific component tiup uninstall tidb --all # Uninstall all installed components tiup uninstall --all`, RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() if self { deletable := []string{"storage/cluster/packages", "components", "manifests", "manifest", "bin"} for _, dir := range deletable { if err := os.RemoveAll(env.Profile().Path(dir)); err != nil { return errors.Trace(err) } fmt.Printf("Remove directory '%s' successfully!\n", env.Profile().Path(dir)) } fmt.Printf("Uninstalled TiUP successfully! (User data reserved, you can delete '%s' manually if you confirm userdata useless)\n", env.Profile().Root()) return nil } switch { case len(args) > 0: return removeComponents(env, args, all) case len(args) == 0 && all: if err := os.RemoveAll(env.LocalPath(localdata.ComponentParentDir)); err != nil { return errors.Trace(err) } fmt.Println("Uninstalled all components successfully!") return nil default: return cmd.Help() } }, } cmdUnInst.Flags().BoolVar(&all, "all", false, "Remove all components or versions.") cmdUnInst.Flags().BoolVar(&self, "self", false, "Uninstall tiup and clean all local data") return cmdUnInst } func removeComponents(env *environment.Environment, specs []string, all bool) error { for _, spec := range specs { paths := []string{} if strings.Contains(spec, ":") { parts := strings.SplitN(spec, ":", 2) // after this version is deleted, component will have no version left. delete the whole component dir directly if !utils.IsExist(env.LocalPath(localdata.ComponentParentDir, parts[0])) { return errors.Trace(fmt.Errorf("component `%s` is not installed, please use `tiup list %s` to check", parts[0], parts[0])) } dir, err := os.ReadDir(env.LocalPath(localdata.ComponentParentDir, parts[0])) if err != nil { return errors.Trace(err) } if parts[1] == utils.NightlyVersionAlias { for _, fi := range dir { if utils.Version(fi.Name()).IsNightly() { paths = append(paths, env.LocalPath(localdata.ComponentParentDir, parts[0], fi.Name())) } } } else { paths = append(paths, env.LocalPath(localdata.ComponentParentDir, parts[0], parts[1])) } // if no more version left, delete the whole component dir if len(dir)-len(paths) < 1 { paths = append(paths, env.LocalPath(localdata.ComponentParentDir, parts[0])) } } else { if !all { fmt.Printf("Use `tiup uninstall %s --all` if you want to remove all versions.\n", spec) continue } paths = append(paths, env.LocalPath(localdata.ComponentParentDir, spec)) } for _, path := range paths { if !utils.IsExist(path) { return errors.Trace(fmt.Errorf("component `%s` is not installed, please check", spec)) } fmt.Printf("Removing directory %s\n", path) if err := os.RemoveAll(path); err != nil { return errors.Trace(err) } } fmt.Printf("Uninstalled component `%s` successfully!\n", spec) } return nil } tiup-1.16.3/cmd/unlink.go000066400000000000000000000030031505422223000151440ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "os" "path/filepath" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/tui" "github.com/spf13/cobra" ) func newUnlinkCmd() *cobra.Command { cmd := &cobra.Command{ Use: "unlink ", Short: "Unlink component binary to $TIUP_HOME/bin/", Long: `[experimental] Unlink component binary in $TIUP_HOME/bin/`, RunE: func(cmd *cobra.Command, args []string) error { env := environment.GlobalEnv() if len(args) != 1 { return cmd.Help() } component, version := environment.ParseCompVersion(args[0]) version, err := env.SelectInstalledVersion(component, version) if err != nil { return err } binPath, err := env.BinaryPath(component, version) if err != nil { return err } target := env.LocalPath("bin", filepath.Base(binPath)) if err := tui.PromptForConfirmOrAbortError("%s will be removed.\n Do you want to continue? [y/N]:", target); err != nil { return err } return os.Remove(target) }, } return cmd } tiup-1.16.3/cmd/update.go000066400000000000000000000103221505422223000151300ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "os" "path/filepath" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newUpdateCmd() *cobra.Command { var all, nightly, force, self bool cmd := &cobra.Command{ Use: "update [component1][:version] [component2..N]", Short: "Update tiup components to the latest version", Long: `Update some components to the latest version. Use --nightly to update to the latest nightly version. Use --all to update all components installed locally. Use : to update to the specified version. Components will be ignored if the latest version has already been installed locally, but you can use --force explicitly to overwrite an existing installation. Use --self which is used to update TiUP to the latest version. All other flags will be ignored if the flag --self is given. $ tiup update --all # Update all components to the latest stable version $ tiup update --nightly --all # Update all components to the latest nightly version $ tiup update playground:v0.0.3 --force # Overwrite an existing local installation $ tiup update --self # Update TiUP to the latest version`, RunE: func(cmd *cobra.Command, components []string) error { if (len(components) == 0 && !all && !force && !self) || (len(components) > 0 && all) { return cmd.Help() } env := environment.GlobalEnv() if self { if err := checkTiUPBinary(env); err != nil { return err } originFile := env.LocalPath("bin", "tiup") renameFile := env.LocalPath("bin", "tiup.tmp") if err := os.Rename(originFile, renameFile); err != nil { fmt.Printf("Backup of `%s` to `%s` failed.\n", originFile, renameFile) return err } var err error defer func() { if err != nil || utils.IsNotExist(originFile) { if err := os.Rename(renameFile, originFile); err != nil { fmt.Printf("Please rename `%s` to `%s` manually.\n", renameFile, originFile) } } else { if err := os.Remove(renameFile); err != nil { fmt.Printf("Please delete `%s` manually.\n", renameFile) } } }() err = env.SelfUpdate() if err != nil { return err } } if force || all || len(components) > 0 { err := updateComponents(env, components, nightly, force) if err != nil { return err } } fmt.Println("Updated successfully!") return nil }, } cmd.Flags().BoolVar(&all, "all", false, "Update all components") cmd.Flags().BoolVar(&nightly, "nightly", false, "Update the components to nightly version") cmd.Flags().BoolVar(&force, "force", false, "Force update a component to the latest version") cmd.Flags().BoolVar(&self, "self", false, "Update tiup to the latest version") return cmd } func updateComponents(env *environment.Environment, components []string, nightly, force bool) error { if len(components) == 0 { installed, err := env.Profile().InstalledComponents() if err != nil { return err } components = installed } return env.UpdateComponents(components, nightly, force) } // checkTiUPBinary check if TiUP exists in TiUP_HOME func checkTiUPBinary(env *environment.Environment) error { tiUPHomePath, _ := filepath.Abs(env.LocalPath("bin", "tiup")) realTiUPPath, err := os.Executable() if err != nil { // Ignore the problem that the execution directory cannot be obtained return nil } realTiUPPath, _ = filepath.Abs(realTiUPPath) if utils.IsNotExist(tiUPHomePath) || tiUPHomePath != realTiUPPath { fmt.Printf("Tiup install directory is: %s\n", filepath.Dir(realTiUPPath)) return fmt.Errorf("If you used some external package manager to install TiUP (e.g., brew), try upgrade with that") } return nil } tiup-1.16.3/code_review_guide.md000066400000000000000000000040231505422223000165470ustar00rootroot00000000000000# Code Review Guide ## Things to do before you start reviewing the PR * Make sure you are familiar with the packages the PR modifies. * Make sure you have enough continuous time to review the PR, use 300 LOC per hour to estimate. * Make sure you can follow the updates of the PR in the next few work days. * Read the description of the PR, if it's not easy to understand, ask the coder to improve it. * For a bug fix PR, if there is no test case, ask the coder to add tests. * For a performance PR, if no benchmark result is provided, ask the coder to add a benchmark result. ## Things to check during the review process * Am I able to understand the purpose of each unit test? * Do unit tests actually test that the code is performing the intended functionality? * Do unit tests cover all the important code blocks and specially handled errors? * Could procedure tests be rewritten to table driven tests? * Is the code written following the style guide? * Is the same code duplicated more than twice? * Do comments exist and describe the intent of the code? * Are hacks, workarounds and temporary fixes commented? * Does this function do more than the name suggests? * Can this function's behavior be inferred by its name? * Do tests exist and are they comprehensive? * Do unit tests cover all the important code branches? * Could the test code be extracted into a table-driven test? ## Things to keep in mind when you are writing a review comment * Be kind to the coder, not to the code. * Ask questions rather than make statements. * Treat people who know less than you with respect, deference, and patience. * Remember to praise when the code quality exceeds your expectation. * It isn't necessarily wrong if the coder's solution is different than yours. * Refer to the code style document when necessary. ## Things to remember after you submitted the review comment * Checkout Github notification regularly to keep track of the updates of the PR. * When the PR has been updated, start another round of review or give it a LGTM. tiup-1.16.3/components/000077500000000000000000000000001505422223000147435ustar00rootroot00000000000000tiup-1.16.3/components/bench/000077500000000000000000000000001505422223000160225ustar00rootroot00000000000000tiup-1.16.3/components/bench/README.md000066400000000000000000000001271505422223000173010ustar00rootroot00000000000000# TiUP bench tiup bench has been **moved** to https://github.com/PingCAP-QE/tiup-benchtiup-1.16.3/components/client/000077500000000000000000000000001505422223000162215ustar00rootroot00000000000000tiup-1.16.3/components/client/Makefile000066400000000000000000000064071505422223000176700ustar00rootroot00000000000000.DEFAULT_GOAL := default LANG=C MAKEOVERRIDES = targets: @printf "%-30s %s\n" "Target" "Description" @printf "%-30s %s\n" "------" "-----------" @make -pqR : 2>/dev/null \ | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' \ | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' \ | sort \ | xargs -I _ sh -c 'printf "%-30s " _; make _ -nB | (grep "^# Target:" || echo "") | tail -1 | sed "s/^# Target: //g"' REPO := github.com/pingcap/tiup GOOS := $(if $(GOOS),$(GOOS),$(shell go env GOOS)) GOARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) GOENV := GO111MODULE=on CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) GO := $(GOENV) go GOBUILD := $(GO) build $(BUILD_FLAGS) GOTEST := GO111MODULE=on CGO_ENABLED=1 go test -p 3 SHELL := /usr/bin/env bash _COMMIT := $(shell git describe --no-match --always --dirty) _GITREF := $(shell git rev-parse --abbrev-ref HEAD) COMMIT := $(if $(COMMIT),$(COMMIT),$(_COMMIT)) GITREF := $(if $(GITREF),$(GITREF),$(_GITREF)) LDFLAGS := -w -s LDFLAGS += -X "$(REPO)/pkg/version.GitHash=$(COMMIT)" LDFLAGS += -X "$(REPO)/pkg/version.GitRef=$(GITREF)" LDFLAGS += $(EXTRA_LDFLAGS) FILES := $$(find . -name "*.go") FAILPOINT_ENABLE := $$(go tool github.com/pingcap/failpoint/failpoint-ctl enable) FAILPOINT_DISABLE := $$(go tool github.com/pingcap/failpoint/failpoint-ctl disable) default: check build @# Target: run the checks and then build. include ../../tests/Makefile # Build components build: components @# Target: build tiup and all it's components components: client @# Target: build the playground, client, cluster, dm and server components client: @# Target: build the tiup-client component $(GOBUILD) -ldflags '$(LDFLAGS)' -o ../../bin/tiup-client . check: fmt lint check-static vet @# Target: run all checkers. (fmt, lint, check-static and vet) check-static: @# Target: run the golangci-lint static check tool ../../tools/bin/golangci-lint run --config ../../tools/check/golangci.yaml ./... --timeout=3m --fix lint: @# Target: run the lint checker revive @echo "linting" @go tool github.com/mgechev/revive -formatter friendly -config ../../tools/check/revive.toml $(FILES) vet: @# Target: run the go vet tool $(GO) vet ./... clean: @# Target: run the build cleanup steps @rm -rf bin @rm -rf tests/*/{bin/*.test,logs} test: failpoint-enable run-tests failpoint-disable @# Target: run tests with failpoint enabled # TODO: refactor integration tests base on v1 manifest # run-tests: unit-test integration_test run-tests: unit-test @# Target: run the unit tests # Run tests unit-test: @# Target: run the code coverage test phase mkdir -p ../../cover TIUP_HOME=$(shell pwd)/../../tests/tiup $(GOTEST) ./... -covermode=count -coverprofile ../../cover/cov.unit-test.client.out race: failpoint-enable @# Target: run race check with failpoint enabled TIUP_HOME=$(shell pwd)/../../tests/tiup $(GOTEST) -race ./... || { $(FAILPOINT_DISABLE); exit 1; } @$(FAILPOINT_DISABLE) failpoint-enable: @# Target: enable failpoint @$(FAILPOINT_ENABLE) failpoint-disable: @# Target: disable failpoint @$(FAILPOINT_DISABLE) fmt: @# Target: run the go formatter utility @echo "gofmt (simplify)" @gofmt -s -l -w $(FILES) 2>&1 @echo "goimports (if installed)" $(shell goimports -w $(FILES) 2>/dev/null) tiup-1.16.3/components/client/endpoint.go000066400000000000000000000011161505422223000203670ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main type endpoint struct { component string dsn string } tiup-1.16.3/components/client/main.go000066400000000000000000000114671505422223000175050ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bufio" "context" "fmt" "io" "log" "os" "os/user" "path" ui "github.com/gizak/termui/v3" "github.com/gizak/termui/v3/widgets" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/repository" gops "github.com/shirou/gopsutil/process" "github.com/spf13/cobra" _ "github.com/xo/usql/drivers/mysql" "github.com/xo/usql/env" "github.com/xo/usql/handler" "github.com/xo/usql/rline" ) func main() { if err := execute(); err != nil { os.Exit(1) } } func execute() error { rootCmd := &cobra.Command{ Use: "tiup client", Short: "Connect a TiDB cluster in your local host", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { target := "" if len(args) > 0 { target = args[0] } env, err := environment.InitEnv(repository.Options{}, repository.MirrorOptions{}) if err != nil { return err } environment.SetGlobalEnv(env) return connect(target) }, } return rootCmd.Execute() } func connect(target string) error { tiupHome := os.Getenv(localdata.EnvNameHome) if tiupHome == "" { return fmt.Errorf("env variable %s not set, are you running client out of tiup?", localdata.EnvNameHome) } endpoints, err := scanEndpoint(tiupHome) if err != nil { return fmt.Errorf("error on read files: %s", err.Error()) } if len(endpoints) == 0 { return fmt.Errorf("It seems no playground is running, execute `tiup playground` to start one") } var ep *endpoint if target == "" { if ep = selectEndpoint(endpoints); ep == nil { os.Exit(0) } } else { for _, end := range endpoints { if end.component == target { ep = end } } if ep == nil { return fmt.Errorf("specified instance %s not found, maybe it's not alive now, execute `tiup status` to see instance list", target) } } u, err := user.Current() if err != nil { return fmt.Errorf("can't get current user: %s", err.Error()) } l, err := rline.New(false, "", env.HistoryFile(u)) if err != nil { return fmt.Errorf("can't open history file: %s", err.Error()) } h := handler.New(l, u, os.Getenv(localdata.EnvNameInstanceDataDir), true) if err = h.Open(context.TODO(), ep.dsn); err != nil { return fmt.Errorf("can't open connection to %s: %s", ep.dsn, err.Error()) } if err = h.Run(); err != io.EOF { return err } return nil } func scanEndpoint(tiupHome string) ([]*endpoint, error) { endpoints := []*endpoint{} files, err := os.ReadDir(path.Join(tiupHome, localdata.DataParentDir)) if err != nil { return nil, err } for _, file := range files { if !isInstanceAlive(tiupHome, file.Name()) { continue } endpoints = append(endpoints, readDsn(path.Join(tiupHome, localdata.DataParentDir, file.Name()), file.Name())...) } return endpoints, nil } func isInstanceAlive(tiupHome, instance string) bool { s, err := environment.GlobalEnv().Profile().ReadMetaFile(instance) if err != nil { return false } exist, _ := gops.PidExists(int32(s.Pid)) return exist } func readDsn(dir, component string) []*endpoint { endpoints := []*endpoint{} file, err := os.Open(path.Join(dir, "dsn")) if err != nil { return endpoints } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { endpoints = append(endpoints, &endpoint{ component: component, dsn: scanner.Text(), }) } return endpoints } func selectEndpoint(endpoints []*endpoint) *endpoint { if err := ui.Init(); err != nil { log.Fatalf("failed to initialize termui: %v", err) } defer ui.Close() l := widgets.NewList() l.Title = "Choose an endpoint to connect" ml := 0 for _, ep := range endpoints { if ml < len(ep.component) { ml = len(ep.component) } } fmtStr := fmt.Sprintf(" %%-%ds %%s", ml) for _, ep := range endpoints { l.Rows = append(l.Rows, fmt.Sprintf(fmtStr, ep.component, ep.dsn)) } l.TextStyle = ui.NewStyle(ui.ColorWhite) l.SelectedRowStyle = ui.NewStyle(ui.ColorGreen) l.WrapText = false size := min(len(endpoints), 16) l.SetRect(0, 0, 80, size+2) ui.Render(l) uiEvents := ui.PollEvents() for { e := <-uiEvents _ = os.WriteFile("/tmp/log", []byte(e.ID+"\n"), 0664) switch e.ID { case "q", "": return nil case "j", "": l.ScrollDown() case "k", "": l.ScrollUp() case "": return endpoints[l.SelectedRow] } ui.Render(l) } } tiup-1.16.3/components/cluster/000077500000000000000000000000001505422223000164245ustar00rootroot00000000000000tiup-1.16.3/components/cluster/command/000077500000000000000000000000001505422223000200425ustar00rootroot00000000000000tiup-1.16.3/components/cluster/command/audit.go000066400000000000000000000033271505422223000215040ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/audit" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/spf13/cobra" ) // retainDay number of days to keep audit logs for deletion var retainDays int func newAuditCmd() *cobra.Command { cmd := &cobra.Command{ Use: "audit [audit-id]", Short: "Show audit log of cluster operation", RunE: func(cmd *cobra.Command, args []string) error { switch len(args) { case 0: return audit.ShowAuditList(spec.AuditDir()) case 1: return audit.ShowAuditLog(spec.AuditDir(), args[0]) default: return cmd.Help() } }, } cmd.AddCommand(newAuditCleanupCmd()) return cmd } func newAuditCleanupCmd() *cobra.Command { cmd := &cobra.Command{ Use: "cleanup", Short: "cleanup cluster audit logs", RunE: func(cmd *cobra.Command, args []string) error { if retainDays < 0 { return errors.Errorf("retain-days cannot be less than 0") } err := audit.DeleteAuditLog(spec.AuditDir(), retainDays, skipConfirm, gOpt.DisplayMode) if err != nil { return err } return nil }, } cmd.Flags().IntVar(&retainDays, "retain-days", 60, "Number of days to keep audit logs for deletion") return cmd } tiup-1.16.3/components/cluster/command/check.go000066400000000000000000000062501505422223000214510ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "path" "github.com/pingcap/tiup/pkg/cluster/manager" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newCheckCmd() *cobra.Command { opt := manager.CheckOptions{ Opr: &operator.CheckOptions{}, IdentityFile: path.Join(utils.UserHome(), ".ssh", "id_rsa"), } cmd := &cobra.Command{ Use: "check [scale-out.yml]", Short: "Perform preflight checks for the cluster.", Long: `Perform preflight checks for the cluster. By default, it checks deploy servers before a cluster is deployed, the input is the topology.yaml for the cluster. If '--cluster' is set, it will perform checks for an existing cluster, the input is the cluster name. Some checks are ignore in this mode, such as port and dir conflict checks with other clusters If you want to check the scale-out topology, please use execute the following command ' check --cluster ' it will check the new instances `, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 && len(args) != 2 { return cmd.Help() } scaleOutTopo := "" if len(args) == 2 { if !opt.ExistCluster { return cmd.Help() } scaleOutTopo = args[1] } return cm.CheckCluster(args[0], scaleOutTopo, opt, gOpt) }, } cmd.Flags().StringVarP(&opt.User, "user", "u", utils.CurrentUser(), "The user name to login via SSH. The user must has root (or sudo) privilege.") cmd.Flags().StringVarP(&opt.IdentityFile, "identity_file", "i", opt.IdentityFile, "The path of the SSH identity file. If specified, public key authentication will be used.") cmd.Flags().BoolVarP(&opt.UsePassword, "password", "p", false, "Use password of target hosts. If specified, password authentication will be used.") cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only check specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only check specified nodes") cmd.Flags().BoolVar(&opt.Opr.EnableCPU, "enable-cpu", false, "Enable CPU thread count check") cmd.Flags().BoolVar(&opt.Opr.EnableMem, "enable-mem", false, "Enable memory size check") cmd.Flags().BoolVar(&opt.Opr.EnableDisk, "enable-disk", false, "Enable disk IO (fio) check") cmd.Flags().BoolVar(&opt.ApplyFix, "apply", false, "Try to fix failed checks") cmd.Flags().BoolVar(&opt.ExistCluster, "cluster", false, "Check existing cluster, the input is a cluster name.") cmd.Flags().Uint64Var(&gOpt.APITimeout, "api-timeout", 10, "Timeout in seconds when querying PD APIs.") cmd.Flags().StringVarP(&opt.TempDir, "tempdir", "t", "/tmp/tiup", "The temporary directory.") return cmd } tiup-1.16.3/components/cluster/command/clean.go000066400000000000000000000053511505422223000214570ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/spf13/cobra" ) func newCleanCmd() *cobra.Command { cleanOpt := operator.Options{} cleanALl := false cmd := &cobra.Command{ Use: "clean ", Short: "(EXPERIMENTAL) Cleanup a specified cluster", Long: `EXPERIMENTAL: This is an experimental feature, things may or may not work, please backup your data before process. Cleanup a specified cluster without destroying it. You can retain some nodes and roles data when cleanup the cluster, eg: $ tiup cluster clean --all $ tiup cluster clean --log $ tiup cluster clean --data $ tiup cluster clean --audit-log $ tiup cluster clean --all --ignore-role prometheus $ tiup cluster clean --all --ignore-node 172.16.13.11:9000 $ tiup cluster clean --all --ignore-node 172.16.13.12`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] if cleanALl { cleanOpt.CleanupData = true cleanOpt.CleanupLog = true } if !(cleanOpt.CleanupData || cleanOpt.CleanupLog || cleanOpt.CleanupAuditLog) { return cmd.Help() } return cm.CleanCluster(clusterName, gOpt, cleanOpt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringArrayVar(&cleanOpt.RetainDataNodes, "ignore-node", nil, "Specify the nodes or hosts whose data will be retained") cmd.Flags().StringArrayVar(&cleanOpt.RetainDataRoles, "ignore-role", nil, "Specify the roles whose data will be retained") cmd.Flags().BoolVar(&cleanOpt.CleanupData, "data", false, "Cleanup data") cmd.Flags().BoolVar(&cleanOpt.CleanupLog, "log", false, "Cleanup log") cmd.Flags().BoolVar(&cleanOpt.CleanupAuditLog, "audit-log", false, "Cleanup TiDB-server audit log") cmd.Flags().BoolVar(&cleanALl, "all", false, "Cleanup both log and data (not include audit log)") return cmd } tiup-1.16.3/components/cluster/command/deploy.go000066400000000000000000000064661505422223000217010ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "context" "path" "github.com/pingcap/tiup/pkg/cluster/manager" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) var ( errNSDeploy = errNS.NewSubNamespace("deploy") errDeployNameDuplicate = errNSDeploy.NewType("name_dup", utils.ErrTraitPreCheck) ) func newDeploy() *cobra.Command { opt := manager.DeployOptions{ IdentityFile: path.Join(utils.UserHome(), ".ssh", "id_rsa"), } cmd := &cobra.Command{ Use: "deploy ", Short: "Deploy a cluster for production", Long: "Deploy a cluster for production. SSH connection will be used to deploy files, as well as creating system users for running the service.", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { shouldContinue, err := tui.CheckCommandArgsAndMayPrintHelp(cmd, args, 3) if err != nil { return err } if !shouldContinue { return nil } clusterName := args[0] version, err := utils.FmtVer(args[1]) if err != nil { return err } topoFile := args[2] return cm.Deploy(clusterName, version, topoFile, opt, postDeployHook, skipConfirm, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 2: return nil, cobra.ShellCompDirectiveDefault default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVarP(&opt.User, "user", "u", utils.CurrentUser(), "The user name to login via SSH. The user must has root (or sudo) privilege.") cmd.Flags().BoolVarP(&opt.SkipCreateUser, "skip-create-user", "", false, "(EXPERIMENTAL) Skip creating the user specified in topology.") cmd.Flags().StringVarP(&opt.IdentityFile, "identity_file", "i", opt.IdentityFile, "The path of the SSH identity file. If specified, public key authentication will be used.") cmd.Flags().BoolVarP(&opt.UsePassword, "password", "p", false, "Use password of target hosts. If specified, password authentication will be used.") cmd.Flags().BoolVarP(&gOpt.IgnoreConfigCheck, "ignore-config-check", "", false, "Ignore the config check result of components") cmd.Flags().BoolVarP(&opt.NoLabels, "no-labels", "", false, "Don't check TiKV labels") return cmd } func postDeployHook(builder *task.Builder, topo spec.Topology, gOpt operator.Options) { enableTask := task.NewBuilder(builder.Logger).Func("Setting service auto start on boot", func(ctx context.Context) error { return operator.Enable(ctx, topo, operator.Options{}, true) }).BuildAsStep("Enable service").SetHidden(true) builder.Parallel(false, enableTask) } tiup-1.16.3/components/cluster/command/destroy.go000066400000000000000000000047371505422223000220750ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( perrs "github.com/pingcap/errors" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/set" "github.com/spf13/cobra" ) func newDestroyCmd() *cobra.Command { destroyOpt := operator.Options{} cmd := &cobra.Command{ Use: "destroy ", Short: "Destroy a specified cluster", Long: `Destroy a specified cluster, which will clean the deployment binaries and data. You can retain some nodes and roles data when destroy cluster, eg: $ tiup cluster destroy --retain-role-data prometheus $ tiup cluster destroy --retain-node-data 172.16.13.11:9000 $ tiup cluster destroy --retain-node-data 172.16.13.12`, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] // Validate the retained roles to prevent unexpected deleting data if len(destroyOpt.RetainDataRoles) > 0 { validRoles := set.NewStringSet(spec.AllComponentNames()...) for _, role := range destroyOpt.RetainDataRoles { if !validRoles.Exist(role) { return perrs.Errorf("role name `%s` invalid", role) } } } return cm.DestroyCluster(clusterName, gOpt, destroyOpt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringArrayVar(&destroyOpt.RetainDataNodes, "retain-node-data", nil, "Specify the nodes or hosts whose data will be retained") cmd.Flags().StringArrayVar(&destroyOpt.RetainDataRoles, "retain-role-data", nil, "Specify the roles whose data will be retained") cmd.Flags().BoolVar(&destroyOpt.Force, "force", false, "Force will ignore remote error while destroy the cluster") return cmd } tiup-1.16.3/components/cluster/command/disable.go000066400000000000000000000027011505422223000217740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newDisableCmd() *cobra.Command { cmd := &cobra.Command{ Use: "disable ", Short: "Disable automatic enabling of TiDB clusters at boot", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return nil } clusterName := args[0] return cm.EnableCluster(clusterName, gOpt, false) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only disable specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only disable specified nodes") return cmd } tiup-1.16.3/components/cluster/command/display.go000066400000000000000000000073221505422223000220420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "fmt" "strings" "time" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/spf13/cobra" ) func newDisplayCmd() *cobra.Command { var ( showDashboardOnly bool showVersionOnly bool showTiKVLabels bool statusTimeout uint64 dopt manager.DisplayOption ) cmd := &cobra.Command{ Use: "display ", Short: "Display information of a TiDB cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } gOpt.APITimeout = statusTimeout dopt.ClusterName = args[0] exist, err := tidbSpec.Exist(dopt.ClusterName) if err != nil { return err } if !exist { return perrs.Errorf("Cluster %s not found", dopt.ClusterName) } metadata, err := spec.ClusterMetadata(dopt.ClusterName) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return err } if showVersionOnly { fmt.Println(metadata.Version) return nil } if showDashboardOnly { tlsCfg, err := metadata.Topology.TLSConfig(tidbSpec.Path(dopt.ClusterName, spec.TLSCertKeyDir)) if err != nil { return err } return cm.DisplayDashboardInfo(dopt.ClusterName, time.Second*time.Duration(gOpt.APITimeout), tlsCfg) } if showTiKVLabels { return cm.DisplayTiKVLabels(dopt, gOpt) } return cm.Display(dopt, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only display specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only display specified nodes") cmd.Flags().BoolVar(&dopt.ShowUptime, "uptime", false, "Display with uptime") cmd.Flags().BoolVar(&showDashboardOnly, "dashboard", false, "Only display TiDB Dashboard information") cmd.Flags().BoolVar(&showVersionOnly, "version", false, "Only display TiDB cluster version") cmd.Flags().BoolVar(&showTiKVLabels, "labels", false, "Only display labels of specified TiKV role or nodes") cmd.Flags().BoolVar(&dopt.ShowProcess, "process", false, "display cpu and memory usage of nodes") cmd.Flags().BoolVar(&dopt.ShowManageHost, "manage-host", false, "display manage host of nodes") cmd.Flags().BoolVar(&dopt.ShowNuma, "numa", false, "display numa information of nodes") cmd.Flags().BoolVar(&dopt.ShowVersions, "versions", false, "display component version of instances") cmd.Flags().Uint64Var(&statusTimeout, "status-timeout", 10, "Timeout in seconds when getting node status") return cmd } func shellCompGetClusterName(cm *manager.Manager, toComplete string) ([]string, cobra.ShellCompDirective) { var result []string clusters, _ := cm.GetClusterList() for _, c := range clusters { if strings.HasPrefix(c.Name, toComplete) { result = append(result, c.Name) } } return result, cobra.ShellCompDirectiveNoFileComp } tiup-1.16.3/components/cluster/command/edit_config.go000066400000000000000000000030201505422223000226360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/spf13/cobra" ) func newEditConfigCmd() *cobra.Command { opt := manager.EditConfigOptions{} cmd := &cobra.Command{ Use: "edit-config ", Short: "Edit TiDB cluster config", Long: "Edit TiDB cluster config. Will use editor from environment variable `EDITOR`, default use vi", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.EditConfig(clusterName, opt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVarP(&opt.NewTopoFile, "topology-file", "", opt.NewTopoFile, "Use provided topology file to substitute the original one instead of editing it.") return cmd } tiup-1.16.3/components/cluster/command/enable.go000066400000000000000000000026641505422223000216270ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newEnableCmd() *cobra.Command { cmd := &cobra.Command{ Use: "enable ", Short: "Enable a TiDB cluster automatically at boot", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return nil } clusterName := args[0] return cm.EnableCluster(clusterName, gOpt, true) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only enable specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only enable specified nodes") return cmd } tiup-1.16.3/components/cluster/command/exec.go000066400000000000000000000032061505422223000213160ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/spf13/cobra" ) func newExecCmd() *cobra.Command { opt := manager.ExecOptions{} cmd := &cobra.Command{ Use: "exec ", Short: "Run shell command on host in the tidb cluster", Hidden: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.Exec(clusterName, opt, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVar(&opt.Command, "command", "ls", "the command run on cluster host") cmd.Flags().BoolVar(&opt.Sudo, "sudo", false, "use root permissions (default false)") cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only exec on host with specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only exec on host with specified nodes") return cmd } tiup-1.16.3/components/cluster/command/import.go000066400000000000000000000132011505422223000217000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "context" "fmt" "os" "path/filepath" "github.com/fatih/color" "github.com/pingcap/tiup/pkg/cluster/ansible" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newImportCmd() *cobra.Command { var ( ansibleDir string inventoryFileName string ansibleCfgFile string rename string noBackup bool ) cmd := &cobra.Command{ Use: "import", Short: "Import an exist TiDB cluster from TiDB-Ansible", RunE: func(cmd *cobra.Command, args []string) error { // Use current directory as ansibleDir by default if ansibleDir == "" { cwd, err := os.Getwd() if err != nil { return err } ansibleDir = cwd } ctx := ctxt.New( context.Background(), gOpt.Concurrency, log, ) // migrate cluster metadata from Ansible inventory clsName, clsMeta, inv, err := ansible.ReadInventory(ctx, ansibleDir, inventoryFileName) if err != nil { return err } // Rename the imported cluster if rename != "" { clsName = rename } if clsName == "" { return fmt.Errorf("cluster name should not be empty") } exist, err := tidbSpec.Exist(clsName) if err != nil { return err } if exist { return errDeployNameDuplicate. New("Cluster name '%s' is duplicated", clsName). WithProperty(tui.SuggestionFromFormat( "%s", fmt.Sprintf("Please use --rename `NAME` to specify another name (You can use `%s list` to see all clusters)", tui.OsArgs0()))) } // prompt for backups backupDir := spec.ClusterPath(clsName, "ansible-backup") backupFile := filepath.Join(ansibleDir, fmt.Sprintf("tiup-%s.bak", inventoryFileName)) prompt := fmt.Sprintf("The ansible directory will be moved to %s after import.", backupDir) if noBackup { log.Infof("The '--no-backup' flag is set, the ansible directory will be kept at its current location.") prompt = fmt.Sprintf("The inventory file will be renamed to %s after import.", backupFile) } log.Warnf("TiDB-Ansible and TiUP Cluster can NOT be used together, please DO NOT try to use ansible to manage the imported cluster anymore to avoid metadata conflict.") log.Infof("%s", prompt) if !skipConfirm { err = tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: ") if err != nil { return err } } if !skipConfirm { err = tui.PromptForConfirmOrAbortError( "Prepared to import TiDB %s cluster %s.\nDo you want to continue? [y/N]:", clsMeta.Version, clsName) if err != nil { return err } } // parse config and import nodes if err = ansible.ParseAndImportInventory( ctx, ansibleDir, ansibleCfgFile, clsMeta, inv, gOpt.SSHTimeout, gOpt.SSHType, ); err != nil { return err } // copy SSH key to TiUP profile directory if err = utils.MkdirAll(spec.ClusterPath(clsName, "ssh"), 0755); err != nil { return err } srcKeyPathPriv := ansible.SSHKeyPath() srcKeyPathPub := srcKeyPathPriv + ".pub" dstKeyPathPriv := spec.ClusterPath(clsName, "ssh", "id_rsa") dstKeyPathPub := dstKeyPathPriv + ".pub" if err = utils.Copy(srcKeyPathPriv, dstKeyPathPriv); err != nil { return err } if err = utils.Copy(srcKeyPathPub, dstKeyPathPub); err != nil { return err } // copy config files form deployment servers if err = ansible.ImportConfig(ctx, clsName, clsMeta, gOpt); err != nil { return err } // copy config detail to meta file if err = ansible.LoadConfig(clsName, clsMeta); err != nil { return err } if err = spec.SaveClusterMeta(clsName, clsMeta); err != nil { return err } // comment config to avoid duplicated copy if err = ansible.CommentConfig(clsName); err != nil { return err } // backup ansible files if noBackup { // rename original TiDB-Ansible inventory file if err = utils.Move(filepath.Join(ansibleDir, inventoryFileName), backupFile); err != nil { return err } log.Infof("Ansible inventory renamed to %s.", color.HiCyanString(backupFile)) } else { // move original TiDB-Ansible directory to a staged location if err = utils.Move(ansibleDir, backupDir); err != nil { return err } log.Infof("Ansible inventory saved in %s.", color.HiCyanString(backupDir)) } log.Infof("Cluster %s imported.", clsName) fmt.Printf("Try `%s` to show node list and status of the cluster.\n", color.HiYellowString("%s display %s", tui.OsArgs0(), clsName)) return nil }, } cmd.Flags().StringVarP(&ansibleDir, "dir", "d", "", "The path to TiDB-Ansible directory") cmd.Flags().StringVar(&inventoryFileName, "inventory", ansible.AnsibleInventoryFile, "The name of inventory file") cmd.Flags().StringVar(&ansibleCfgFile, "ansible-config", ansible.AnsibleConfigFile, "The path to ansible.cfg") cmd.Flags().StringVarP(&rename, "rename", "r", "", "Rename the imported cluster to `NAME`") cmd.Flags().BoolVar(&noBackup, "no-backup", false, "Don't backup ansible dir, useful when there're multiple inventory files") return cmd } tiup-1.16.3/components/cluster/command/list.go000066400000000000000000000014151505422223000213450ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "List all clusters", RunE: func(cmd *cobra.Command, args []string) error { return cm.ListCluster() }, } return cmd } tiup-1.16.3/components/cluster/command/meta.go000066400000000000000000000035361505422223000213260ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "fmt" "strings" "time" "github.com/spf13/cobra" ) func newMetaCmd() *cobra.Command { cmd := &cobra.Command{ Use: "meta", Short: "backup/restore meta information", } var filePath string var metaBackupCmd = &cobra.Command{ Use: "backup ", Short: "backup topology and other information of cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf("please input cluster-name") } if filePath == "" { filePath = "tiup-cluster_" + args[0] + "_metabackup_" + strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "-") + ".tar.gz" } err := cm.BackupClusterMeta(args[0], filePath) if err == nil { log.Infof("successfully backup meta of cluster %s on %s", args[0], filePath) } return err }, } metaBackupCmd.Flags().StringVar(&filePath, "file", "", "filepath of output tarball") var metaRestoreCmd = &cobra.Command{ Use: "restore ", Short: "restore topology and other information of cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return fmt.Errorf("please input cluster-name and backup-file") } return cm.RestoreClusterMeta(args[0], args[1], skipConfirm) }, } cmd.AddCommand(metaBackupCmd) cmd.AddCommand(metaRestoreCmd) return cmd } tiup-1.16.3/components/cluster/command/patch.go000066400000000000000000000034251505422223000214740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( perrs "github.com/pingcap/errors" "github.com/spf13/cobra" ) func newPatchCmd() *cobra.Command { var ( overwrite bool offlineMode bool ) cmd := &cobra.Command{ Use: "patch ", Short: "Replace the remote package with a specified package and restart the service", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } if len(gOpt.Nodes) == 0 && len(gOpt.Roles) == 0 { return perrs.New("the flag -R or -N must be specified at least one") } clusterName := args[0] return cm.Patch(clusterName, args[1], gOpt, overwrite, offlineMode, skipConfirm) }, } cmd.Flags().BoolVar(&overwrite, "overwrite", false, "Use this package in the future scale-out operations") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Specify the nodes") cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Specify the roles") cmd.Flags().Uint64Var(&gOpt.APITimeout, "transfer-timeout", 600, "Timeout in seconds when transferring PD and TiKV store leaders, also for TiCDC drain one capture") cmd.Flags().BoolVarP(&offlineMode, "offline", "", false, "Patch a stopped cluster") return cmd } tiup-1.16.3/components/cluster/command/prune.go000066400000000000000000000025041505422223000215230ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newPruneCmd() *cobra.Command { cmd := &cobra.Command{ Use: "prune ", Short: "Destroy and remove instances that is in tombstone state", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.DestroyTombstone(clusterName, gOpt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Ignore errors when deleting the instance with data from the cluster") return cmd } tiup-1.16.3/components/cluster/command/reload.go000066400000000000000000000054401505422223000216420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "slices" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/spf13/cobra" ) func newReloadCmd() *cobra.Command { var skipRestart bool cmd := &cobra.Command{ Use: "reload ", Short: "Reload a TiDB cluster's config and restart if needed", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } clusterName := args[0] return cm.Reload(clusterName, gOpt, skipRestart, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Force reload without transferring PD leader and ignore remote error") cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only reload specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only reload specified nodes") cmd.Flags().Uint64Var(&gOpt.APITimeout, "transfer-timeout", 600, "Timeout in seconds when transferring PD and TiKV store leaders, also for TiCDC drain one capture") cmd.Flags().BoolVarP(&gOpt.IgnoreConfigCheck, "ignore-config-check", "", false, "Ignore the config check result") cmd.Flags().BoolVar(&skipRestart, "skip-restart", false, "Only refresh configuration to remote and do not restart services") cmd.Flags().StringVar(&gOpt.SSHCustomScripts.BeforeRestartInstance.Raw, "pre-restart-script", "", "Custom script to be executed on each server before the service is restarted, does not take effect when --skip-restart is set to true") cmd.Flags().StringVar(&gOpt.SSHCustomScripts.AfterRestartInstance.Raw, "post-restart-script", "", "Custom script to be executed on each server after the service is restarted, does not take effect when --skip-restart is set to true") return cmd } func validRoles(roles []string) error { for _, r := range roles { match := slices.Contains(spec.AllComponentNames(), r) if !match { return perrs.Errorf("not valid role: %s, should be one of: %v", r, spec.AllComponentNames()) } } return nil } tiup-1.16.3/components/cluster/command/rename.go000066400000000000000000000020321505422223000216350ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newRenameCmd() *cobra.Command { cmd := &cobra.Command{ Use: "rename ", Short: "Rename the cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } oldClusterName := args[0] newClusterName := args[1] return cm.Rename(oldClusterName, gOpt, newClusterName, skipConfirm) }, } return cmd } tiup-1.16.3/components/cluster/command/replay.go000066400000000000000000000032641505422223000216720ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "fmt" "path" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/audit" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/tui" "github.com/spf13/cobra" ) func newReplayCmd() *cobra.Command { cmd := &cobra.Command{ Use: "replay ", Short: "Replay previous operation and skip successed steps", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } file := path.Join(spec.AuditDir(), args[0]) if !checkpoint.HasCheckPoint() { if err := checkpoint.SetCheckPoint(file); err != nil { return errors.Annotate(err, "set checkpoint failed") } } args, err := audit.CommandArgs(file) if err != nil { return errors.Annotate(err, "read audit log failed") } if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("Will replay the command `tiup cluster %s`\nDo you want to continue? [y/N]: ", strings.Join(args[1:], " ")), ); err != nil { return err } } rootCmd.SetArgs(args[1:]) return rootCmd.Execute() }, } return cmd } tiup-1.16.3/components/cluster/command/restart.go000066400000000000000000000026531505422223000220630ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newRestartCmd() *cobra.Command { cmd := &cobra.Command{ Use: "restart ", Short: "Restart a TiDB cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } clusterName := args[0] return cm.RestartCluster(clusterName, gOpt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only restart specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only restart specified nodes") return cmd } tiup-1.16.3/components/cluster/command/root.go000066400000000000000000000225151505422223000213610ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "encoding/json" "fmt" "os" "path" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/cluster/manager" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" tiupmeta "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/logger" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "github.com/spf13/cobra" "go.uber.org/zap" ) var ( errNS = errorx.NewNamespace("cmd") rootCmd *cobra.Command gOpt operator.Options skipConfirm bool log = logprinter.NewLogger("") // init default logger ) var ( tidbSpec *spec.SpecManager cm *manager.Manager ) func init() { logger.InitGlobalLogger() tui.AddColorFunctionsForCobra() cobra.EnableCommandSorting = false nativeEnvVar := strings.ToLower(os.Getenv(localdata.EnvNameNativeSSHClient)) if nativeEnvVar == "true" || nativeEnvVar == "1" || nativeEnvVar == "enable" { gOpt.NativeSSH = true } rootCmd = &cobra.Command{ Use: tui.OsArgs0(), Short: "Deploy a TiDB cluster for production", SilenceUsage: true, SilenceErrors: true, Version: version.NewTiUPVersion().String(), PersistentPreRunE: func(cmd *cobra.Command, args []string) error { // populate logger log.SetDisplayModeFromString(gOpt.DisplayMode) var err error var env *tiupmeta.Environment if err = spec.Initialize("cluster"); err != nil { return err } tidbSpec = spec.GetSpecManager() cm = manager.NewManager("tidb", tidbSpec, log) if cmd.Name() != "__complete" { logger.EnableAuditLog(spec.AuditDir()) } // Running in other OS/ARCH Should be fine we only download manifest file. env, err = tiupmeta.InitEnv(repository.Options{ GOOS: "linux", GOARCH: "amd64", }, repository.MirrorOptions{}) if err != nil { return err } tiupmeta.SetGlobalEnv(env) if gOpt.NativeSSH { gOpt.SSHType = executor.SSHTypeSystem log.Infof( "System ssh client will be used (%s=%s)", localdata.EnvNameNativeSSHClient, os.Getenv(localdata.EnvNameNativeSSHClient)) log.Infof("The --native-ssh flag has been deprecated, please use --ssh=system") } err = proxy.MaybeStartProxy( gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, gOpt.SSHProxyUsePassword, gOpt.SSHProxyIdentity, log, ) if err != nil { return perrs.Annotate(err, "start http-proxy") } return nil }, PersistentPostRunE: func(cmd *cobra.Command, args []string) error { proxy.MaybeStopProxy() return tiupmeta.GlobalEnv().V1Repository().Mirror().Close() }, } tui.BeautifyCobraUsageAndHelp(rootCmd) rootCmd.PersistentFlags().Uint64Var(&gOpt.SSHTimeout, "ssh-timeout", 5, "Timeout in seconds to connect host via SSH, ignored for operations that don't need an SSH connection.") // the value of wait-timeout is also used for `systemctl` commands, as the default timeout of systemd for // start/stop operations is 90s, the default value of this argument is better be longer than that rootCmd.PersistentFlags().Uint64Var(&gOpt.OptTimeout, "wait-timeout", 120, "Timeout in seconds to wait for an operation to complete, ignored for operations that don't fit.") rootCmd.PersistentFlags().BoolVarP(&skipConfirm, "yes", "y", false, "Skip all confirmations and assumes 'yes'") rootCmd.PersistentFlags().BoolVar(&gOpt.NativeSSH, "native-ssh", gOpt.NativeSSH, "(EXPERIMENTAL) Use the native SSH client installed on local system instead of the built-in one.") rootCmd.PersistentFlags().StringVar((*string)(&gOpt.SSHType), "ssh", "", "(EXPERIMENTAL) The executor type: 'builtin', 'system', 'none' (default \"builtin\").") rootCmd.PersistentFlags().IntVarP(&gOpt.Concurrency, "concurrency", "c", 5, "max number of parallel tasks allowed") rootCmd.PersistentFlags().StringVar(&gOpt.DisplayMode, "format", "default", "(EXPERIMENTAL) The format of output, available values are [default, json]") rootCmd.PersistentFlags().StringVar(&gOpt.SSHProxyHost, "ssh-proxy-host", "", "The SSH proxy host used to connect to remote host.") rootCmd.PersistentFlags().StringVar(&gOpt.SSHProxyUser, "ssh-proxy-user", utils.CurrentUser(), "The user name used to login the proxy host.") rootCmd.PersistentFlags().IntVar(&gOpt.SSHProxyPort, "ssh-proxy-port", 22, "The port used to login the proxy host.") rootCmd.PersistentFlags().StringVar(&gOpt.SSHProxyIdentity, "ssh-proxy-identity-file", path.Join(utils.UserHome(), ".ssh", "id_rsa"), "The identity file used to login the proxy host.") rootCmd.PersistentFlags().BoolVar(&gOpt.SSHProxyUsePassword, "ssh-proxy-use-password", false, "Use password to login the proxy host.") rootCmd.PersistentFlags().Uint64Var(&gOpt.SSHProxyTimeout, "ssh-proxy-timeout", 5, "Timeout in seconds to connect the proxy host via SSH, ignored for operations that don't need an SSH connection.") _ = rootCmd.PersistentFlags().MarkHidden("native-ssh") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-host") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-user") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-port") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-identity-file") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-use-password") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-timeout") rootCmd.AddCommand( newCheckCmd(), newDeploy(), newStartCmd(), newStopCmd(), newRestartCmd(), newScaleInCmd(), newScaleOutCmd(), newDestroyCmd(), newCleanCmd(), newUpgradeCmd(), newDisplayCmd(), newPruneCmd(), newListCmd(), newAuditCmd(), newImportCmd(), newEditConfigCmd(), newShowConfigCmd(), newReloadCmd(), newPatchCmd(), newRenameCmd(), newEnableCmd(), newDisableCmd(), newExecCmd(), newPullCmd(), newPushCmd(), newTestCmd(), // hidden command for test internally newReplayCmd(), newTemplateCmd(), newTLSCmd(), newMetaCmd(), newRotateSSHCmd(), ) } func printErrorMessageForNormalError(err error) { _, _ = tui.ColorErrorMsg.Fprintf(os.Stderr, "\nError: %s\n", err.Error()) } func printErrorMessageForErrorX(err *errorx.Error) { msg := "" ident := 0 causeErrX := err for causeErrX != nil { if ident > 0 { msg += strings.Repeat(" ", ident) + "caused by: " } currentErrMsg := causeErrX.Message() if len(currentErrMsg) > 0 { if ident == 0 { // Print error code only for top level error msg += fmt.Sprintf("%s (%s)\n", currentErrMsg, causeErrX.Type().FullName()) } else { msg += fmt.Sprintf("%s\n", currentErrMsg) } ident++ } cause := causeErrX.Cause() if c := errorx.Cast(cause); c != nil { causeErrX = c } else { if cause != nil { if ident > 0 { // The error may have empty message. In this case we treat it as a transparent error. // Thus `ident == 0` can be possible. msg += strings.Repeat(" ", ident) + "caused by: " } msg += fmt.Sprintf("%s\n", cause.Error()) } break } } _, _ = tui.ColorErrorMsg.Fprintf(os.Stderr, "\nError: %s", msg) } func extractSuggestionFromErrorX(err *errorx.Error) string { cause := err for cause != nil { v, ok := cause.Property(utils.ErrPropSuggestion) if ok { if s, ok := v.(string); ok { return s } } cause = errorx.Cast(cause.Cause()) } return "" } // Execute executes the root command func Execute() { zap.L().Info("Execute command", zap.String("command", tui.OsArgs())) zap.L().Debug("Environment variables", zap.Strings("env", os.Environ())) code := 0 err := rootCmd.Execute() if err != nil { code = 1 } zap.L().Info("Execute command finished", zap.Int("code", code), zap.Error(err)) switch log.GetDisplayMode() { case logprinter.DisplayModeJSON: obj := struct { Code int `json:"exit_code"` Err string `json:"error,omitempty"` }{ Code: code, } if err != nil { obj.Err = err.Error() } data, err := json.Marshal(obj) if err != nil { fmt.Printf("{\"exit_code\":%d, \"error\":\"%s\"}", code, err) } fmt.Fprintln(os.Stderr, string(data)) default: if err != nil { if errx := errorx.Cast(err); errx != nil { printErrorMessageForErrorX(errx) } else { printErrorMessageForNormalError(err) } if !errorx.HasTrait(err, utils.ErrTraitPreCheck) { logger.OutputDebugLog("tiup-cluster") } if errx := errorx.Cast(err); errx != nil { if suggestion := extractSuggestionFromErrorX(errx); len(suggestion) > 0 { log.Errorf("\n%s\n", suggestion) } } } } err = logger.OutputAuditLogIfEnabled() if err != nil { zap.L().Warn("Write audit log file failed", zap.Error(err)) code = 1 } color.Unset() if code != 0 { os.Exit(code) } } tiup-1.16.3/components/cluster/command/rotate_ssh.go000066400000000000000000000016341505422223000225500ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newRotateSSHCmd() *cobra.Command { cmd := &cobra.Command{ Use: "rotatessh ", Short: "rotate ssh keys on all nodes", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.RotateSSH(clusterName, gOpt, skipConfirm) }, } return cmd } tiup-1.16.3/components/cluster/command/scale_in.go000066400000000000000000000043041505422223000221470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "crypto/tls" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/spf13/cobra" ) func newScaleInCmd() *cobra.Command { cmd := &cobra.Command{ Use: "scale-in ", Short: "Scale in a TiDB cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] scale := func(b *task.Builder, imetadata spec.Metadata, tlsCfg *tls.Config) { metadata := imetadata.(*spec.ClusterMeta) nodes := gOpt.Nodes if !gOpt.Force { nodes = operator.AsyncNodes(metadata.Topology, nodes, false) } b.ClusterOperate(metadata.Topology, operator.ScaleInOperation, gOpt, tlsCfg). UpdateMeta(clusterName, metadata, nodes). UpdateTopology(clusterName, tidbSpec.Path(clusterName), metadata, nodes) } return cm.ScaleIn(clusterName, skipConfirm, gOpt, scale) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Specify the nodes (required)") cmd.Flags().Uint64Var(&gOpt.APITimeout, "transfer-timeout", 600, "Timeout in seconds when transferring PD and TiKV store leaders, also for TiCDC drain one capture") cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Force just try stop and destroy instance before removing the instance from topo") _ = cmd.MarkFlagRequired("node") return cmd } tiup-1.16.3/components/cluster/command/scale_out.go000066400000000000000000000065071505422223000223570ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "path/filepath" "github.com/pingcap/tiup/pkg/cluster/manager" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newScaleOutCmd() *cobra.Command { opt := manager.DeployOptions{ IdentityFile: filepath.Join(utils.UserHome(), ".ssh", "id_rsa"), } cmd := &cobra.Command{ Use: "scale-out [topology.yaml]", Short: "Scale out a TiDB cluster", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { var ( clusterName string topoFile string ) // tiup cluster scale-out --stage1 --stage2 // is equivalent to // tiup cluster scale-out if opt.Stage1 && opt.Stage2 { opt.Stage1 = false opt.Stage2 = false } if opt.Stage2 && len(args) == 1 { clusterName = args[0] } else { if len(args) != 2 { return cmd.Help() } clusterName = args[0] topoFile = args[1] } return cm.ScaleOut( clusterName, topoFile, postScaleOutHook, final, opt, skipConfirm, gOpt, ) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) case 1: return nil, cobra.ShellCompDirectiveDefault default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVarP(&opt.User, "user", "u", utils.CurrentUser(), "The user name to login via SSH. The user must has root (or sudo) privilege.") cmd.Flags().BoolVarP(&opt.SkipCreateUser, "skip-create-user", "", false, "(EXPERIMENTAL) Skip creating the user specified in topology.") cmd.Flags().StringVarP(&opt.IdentityFile, "identity_file", "i", opt.IdentityFile, "The path of the SSH identity file. If specified, public key authentication will be used.") cmd.Flags().BoolVarP(&opt.UsePassword, "password", "p", false, "Use password of target hosts. If specified, password authentication will be used.") cmd.Flags().BoolVarP(&opt.NoLabels, "no-labels", "", false, "Don't check TiKV labels") cmd.Flags().BoolVarP(&opt.Stage1, "stage1", "", false, "Don't start the new instance after scale-out, need to manually execute cluster scale-out --stage2") cmd.Flags().BoolVarP(&opt.Stage2, "stage2", "", false, "Start the new instance and init config after scale-out --stage1") return cmd } func final(builder *task.Builder, name string, meta spec.Metadata, gOpt operator.Options) { builder.UpdateTopology(name, tidbSpec.Path(name), meta.(*spec.ClusterMeta), nil, /* deleteNodeIds */ ) } func postScaleOutHook(builder *task.Builder, newPart spec.Topology, gOpt operator.Options) { postDeployHook(builder, newPart, gOpt) } tiup-1.16.3/components/cluster/command/show_config.go000066400000000000000000000022341505422223000226770ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newShowConfigCmd() *cobra.Command { cmd := &cobra.Command{ Use: "show-config ", Short: "Show TiDB cluster config", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.ShowConfig(clusterName) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } return cmd } tiup-1.16.3/components/cluster/command/start.go000066400000000000000000000102351505422223000215270ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "database/sql" "fmt" "strings" "github.com/fatih/color" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/crypto/rand" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" // for sql/driver _ "github.com/go-sql-driver/mysql" ) func newStartCmd() *cobra.Command { var ( initPasswd bool restoreLeader bool ) cmd := &cobra.Command{ Use: "start ", Short: "Start a TiDB cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } clusterName := args[0] if err := cm.StartCluster(clusterName, gOpt, restoreLeader, func(b *task.Builder, metadata spec.Metadata) { b.UpdateTopology( clusterName, tidbSpec.Path(clusterName), metadata.(*spec.ClusterMeta), nil, /* deleteNodeIds */ ) }); err != nil { return err } // init password if initPasswd { pwd, err := initPassword(clusterName) if err != nil { log.Errorf("Failed to set root password of TiDB database to '%s'", pwd) if strings.Contains(strings.ToLower(err.Error()), "error 1045") { log.Errorf("Initializing is only working when the root password is empty") log.Errorf("%s", color.YellowString("Did you already set root password before?")) } return err } log.Warnf("The root password of TiDB database has been changed.") fmt.Printf("The new password is: '%s'.\n", color.HiYellowString(pwd)) // use fmt to avoid printing to audit log log.Warnf("Copy and record it to somewhere safe, %s, and will not be stored.", color.HiRedString("it is only displayed once")) log.Warnf("The generated password %s.", color.HiRedString("can NOT be get and shown again")) } return nil }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().BoolVar(&initPasswd, "init", false, "Initialize a secure root password for the database") cmd.Flags().BoolVar(&restoreLeader, "restore-leaders", false, "Allow leaders to be scheduled to stores after start") cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only start specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only start specified nodes") _ = cmd.Flags().MarkHidden("restore-leaders") return cmd } func initPassword(clusterName string) (string, error) { metadata, err := spec.ClusterMetadata(clusterName) if err != nil { return "", err } tcpProxy := proxy.GetTCPProxy() // generate password pwd, err := rand.Password(18) if err != nil { return pwd, err } var lastErr error for _, spec := range metadata.Topology.TiDBServers { endpoint := utils.JoinHostPort(spec.Host, spec.Port) if tcpProxy != nil { closeC := tcpProxy.Run([]string{endpoint}) defer tcpProxy.Close(closeC) endpoint = tcpProxy.GetEndpoints()[0] } db, err := createDB(endpoint) if err != nil { lastErr = err continue } defer db.Close() sqlStr := fmt.Sprintf("SET PASSWORD FOR 'root'@'%%' = '%s'; FLUSH PRIVILEGES;", pwd) _, err = db.Exec(sqlStr) if err != nil { lastErr = err continue } return pwd, nil } return pwd, lastErr } func createDB(endpoint string) (db *sql.DB, err error) { dsn := fmt.Sprintf("root:@tcp(%s)/?charset=utf8mb4,utf8&multiStatements=true", endpoint) db, err = sql.Open("mysql", dsn) return } tiup-1.16.3/components/cluster/command/stop.go000066400000000000000000000031151505422223000213560ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newStopCmd() *cobra.Command { var evictLeader bool cmd := &cobra.Command{ Use: "stop ", Short: "Stop a TiDB cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } clusterName := args[0] return cm.StopCluster(clusterName, gOpt, skipConfirm, evictLeader) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only stop specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only stop specified nodes") cmd.Flags().BoolVar(&evictLeader, "evict-leaders", false, "Evict leaders on stores before stop") _ = cmd.Flags().MarkHidden("evict-leaders") return cmd } tiup-1.16.3/components/cluster/command/template.go000066400000000000000000000126741505422223000222160ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "bytes" "fmt" "path" "text/template" "github.com/pingcap/errors" "github.com/pingcap/tiup/embed" "github.com/spf13/cobra" ) // TemplateOptions contains the options for print topology template. type TemplateOptions struct { Full bool // print full template MultiDC bool // print template for deploying to multiple data center Local bool // print and render local template } // LocalTemplate contains the variables for print local template. type LocalTemplate struct { GlobalUser string // global.user in yaml template GlobalGroup string // global.group in yaml template GlobalSystemdMode string // global.systemd_mode in yaml template GlobalSSHPort int // global.ssh_port in yaml template GlobalDeployDir string // global.deploy_dir in yaml template GlobalDataDir string // global.data_dir in yaml template GlobalArch string // global.arch in yaml template PDServers []string // pd_servers in yaml template TiDBServers []string // tidb_servers in yaml template TiKVServers []string // tikv_servers in yaml template TiFlashServers []string // tiflash_servers in yaml template MonitoringServers []string // monitoring_servers in yaml template GrafanaServers []string // grafana_servers in yaml template AlertManagerServers []string // alertmanager_servers in yaml template } // This is used to identify how many bool type options are set, so that an // error can be throw if more than one is given. func sumBool(b ...bool) int { n := 0 for _, v := range b { if v { n++ } } return n } func newTemplateCmd() *cobra.Command { opt := TemplateOptions{} localOpt := LocalTemplate{} cmd := &cobra.Command{ Use: "template", Short: "Print topology template", RunE: func(cmd *cobra.Command, args []string) error { if sumBool(opt.Full, opt.MultiDC, opt.Local) > 1 { return errors.New("at most one of 'full', 'multi-dc', or 'local' can be specified") } name := "minimal.yaml" switch { case opt.Full: name = "topology.example.yaml" case opt.MultiDC: name = "multi-dc.yaml" case opt.Local: name = "local.tpl" } fp := path.Join("examples", "cluster", name) tpl, err := embed.ReadExample(fp) if err != nil { return err } if !opt.Local { // print example yaml and return fmt.Fprintln(cmd.OutOrStdout(), string(tpl)) return nil } // redner template // validate arch if localOpt.GlobalArch != "amd64" && localOpt.GlobalArch != "arm64" { return fmt.Errorf(`supported values are "amd64" or "arm64" in global.arch`) } tmpl, err := template.New(name).Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, &localOpt); err != nil { return err } fmt.Fprintln(cmd.OutOrStdout(), content.String()) return nil }, } cmd.Flags().BoolVar(&opt.Full, "full", false, "Print the full topology template for TiDB cluster.") cmd.Flags().BoolVar(&opt.MultiDC, "multi-dc", false, "Print template for deploying to multiple data center.") cmd.Flags().BoolVar(&opt.Local, "local", false, "Print and render template for deploying a simple cluster locally.") // template values for rendering cmd.Flags().StringVar(&localOpt.GlobalUser, "user", "tidb", "The user who runs the tidb cluster.") cmd.Flags().StringVar(&localOpt.GlobalGroup, "group", "", "group is used to specify the group name the user belong to if it's not the same as user.") cmd.Flags().StringVar(&localOpt.GlobalSystemdMode, "systemd_mode", "system", "systemd_mode is used to select whether to use sudo permissions.") cmd.Flags().IntVar(&localOpt.GlobalSSHPort, "ssh-port", 22, "SSH port of servers in the managed cluster.") cmd.Flags().StringVar(&localOpt.GlobalDeployDir, "deploy-dir", "/tidb-deploy", "Storage directory for cluster deployment files, startup scripts, and configuration files.") cmd.Flags().StringVar(&localOpt.GlobalDataDir, "data-dir", "/tidb-data", "TiDB Cluster data storage directory.") cmd.Flags().StringVar(&localOpt.GlobalArch, "arch", "amd64", "Supported values: \"amd64\", \"arm64\".") cmd.Flags().StringSliceVar(&localOpt.PDServers, "pd-servers", []string{"127.0.0.1"}, "List of PD servers") cmd.Flags().StringSliceVar(&localOpt.TiDBServers, "tidb-servers", []string{"127.0.0.1"}, "List of TiDB servers") cmd.Flags().StringSliceVar(&localOpt.TiKVServers, "tikv-servers", []string{"127.0.0.1"}, "List of TiKV servers") cmd.Flags().StringSliceVar(&localOpt.TiFlashServers, "tiflash-servers", nil, "List of TiFlash servers") cmd.Flags().StringSliceVar(&localOpt.MonitoringServers, "monitoring-servers", []string{"127.0.0.1"}, "List of monitor servers") cmd.Flags().StringSliceVar(&localOpt.GrafanaServers, "grafana-servers", []string{"127.0.0.1"}, "List of grafana servers") cmd.Flags().StringSliceVar(&localOpt.AlertManagerServers, "alertmanager-servers", nil, "List of alermanager servers") return cmd } tiup-1.16.3/components/cluster/command/template_test.go000066400000000000000000000077231505422223000232540ustar00rootroot00000000000000package command import ( "bytes" "io" "strings" "testing" ) func Test_TemplateLocalCommandSingle(t *testing.T) { tests := []struct { optKey string optVal string expected string }{ {"user", "ubuntu", "user: \"ubuntu\""}, {"group", "ubuntu", "group: \"ubuntu\""}, {"ssh-port", "2222", "ssh_port: 2222"}, {"deploy-dir", "/path/to/deploy", "deploy_dir: \"/path/to/deploy\""}, {"data-dir", "/path/to/data", "data_dir: \"/path/to/data\""}, {"arch", "arm64", "arch: \"arm64\""}, {"pd-servers", "a,b,c", "pd_servers:\n - host: a\n - host: b\n - host: c"}, {"tidb-servers", "a,b,c", "tidb_servers:\n - host: a\n - host: b\n - host: c"}, {"tikv-servers", "a,b,c", "tikv_servers:\n - host: a\n - host: b\n - host: c"}, {"tiflash-servers", "a,b,c", "tiflash_servers:\n - host: a\n - host: b\n - host: c"}, {"monitoring-servers", "a,b,c", "monitoring_servers:\n - host: a\n - host: b\n - host: c"}, {"grafana-servers", "a,b,c", "grafana_servers:\n - host: a\n - host: b\n - host: c"}, {"alertmanager-servers", "a,b,c", "alertmanager_servers:\n - host: a\n - host: b\n - host: c"}, } for _, test := range tests { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local _ = cmd.Flags().Set(test.optKey, test.optVal) if err := cmd.Execute(); err != nil { t.Fatal(err) } out, err := io.ReadAll(b) if err != nil { t.Fatal(err) } if !strings.Contains(string(out), test.expected) { t.Fatalf("expected \"%s\", got \"%s\"", test.expected, string(out)) } } } func Test_TemplateLocalCommandMulti(t *testing.T) { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local _ = cmd.Flags().Set("user", "ubuntu") // add --user=ubuntu _ = cmd.Flags().Set("group", "ubuntu") // add --group=ubuntu _ = cmd.Flags().Set("tidb-servers", "a,b,c") // add --tidb-servers=a,b,c _ = cmd.Flags().Set("alertmanager-servers", "a,b,c") // add --alertmanager-servers=a,b,c if err := cmd.Execute(); err != nil { t.Fatal(err) } out, err := io.ReadAll(b) if err != nil { t.Fatal(err) } for _, b := range []bool{ strings.Contains(string(out), "user: \"ubuntu\""), strings.Contains(string(out), "group: \"ubuntu\""), strings.Contains(string(out), "tidb_servers:\n - host: a\n - host: b\n - host: c"), strings.Contains(string(out), "alertmanager_servers:\n - host: a\n - host: b\n - host: c"), } { if !b { t.Fatalf("unexpected output. got \"%s\"", string(out)) } } } func Test_TemplateLocalCommandNoopt(t *testing.T) { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local if err := cmd.Execute(); err != nil { t.Fatal(err) } out, err := io.ReadAll(b) if err != nil { t.Fatal(err) } // check default output for _, b := range []bool{ strings.Contains(string(out), "user: \"tidb\""), strings.Contains(string(out), "ssh_port: 22"), strings.Contains(string(out), "deploy_dir: \"/tidb-deploy\""), strings.Contains(string(out), "data_dir: \"/tidb-data\""), strings.Contains(string(out), "arch: \"amd64\""), strings.Contains(string(out), "pd_servers:\n - host: 127.0.0.1"), strings.Contains(string(out), "tidb_servers:\n - host: 127.0.0.1"), strings.Contains(string(out), "tikv_servers:\n - host: 127.0.0.1"), strings.Contains(string(out), "monitoring_servers:\n - host: 127.0.0.1"), strings.Contains(string(out), "grafana_servers:\n - host: 127.0.0.1"), } { if !b { t.Fatalf("unexpected output. got \"%s\"", string(out)) } } } func Test_TemplateLocalCommandValidate(t *testing.T) { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local _ = cmd.Flags().Set("arch", "i386") // add --arch=i386 (invalid) // should returns err if err := cmd.Execute(); err == nil { t.Fatal(err) } } tiup-1.16.3/components/cluster/command/test.go000066400000000000000000000066221505422223000213560ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "context" "errors" "fmt" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" // for sql/driver _ "github.com/go-sql-driver/mysql" ) func newTestCmd() *cobra.Command { cmd := &cobra.Command{ Use: "_test ", Short: "test toolkit", Hidden: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) < 1 { return cmd.Help() } clusterName := args[0] exist, err := tidbSpec.Exist(clusterName) if err != nil { return err } if !exist { return perrs.Errorf("cannot start non-exists cluster %s", clusterName) } metadata, err := spec.ClusterMetadata(clusterName) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } tcpProxy := proxy.GetTCPProxy() switch args[1] { case "writable": return writable(metadata.Topology, tcpProxy) case "data": return data(metadata.Topology, tcpProxy) default: fmt.Println("unknown command: ", args[1]) return cmd.Help() } }, } return cmd } // To check if test.ti_cluster has data func data(topo *spec.Specification, tcpProxy *proxy.TCPProxy) error { errg, _ := errgroup.WithContext(context.Background()) for _, spec := range topo.TiDBServers { endpoint := utils.JoinHostPort(spec.Host, spec.Port) errg.Go(func() error { if tcpProxy != nil { closeC := tcpProxy.Run([]string{endpoint}) defer tcpProxy.Close(closeC) endpoint = tcpProxy.GetEndpoints()[0] } db, err := createDB(endpoint) if err != nil { return err } row := db.QueryRow("select count(*) from test.ti_cluster") count := 0 if err := row.Scan(&count); err != nil { return err } if count == 0 { return errors.New("table test.ti_cluster is empty") } fmt.Printf("check data %s success\n", utils.JoinHostPort(spec.Host, spec.Port)) return nil }) } return errg.Wait() } func writable(topo *spec.Specification, tcpProxy *proxy.TCPProxy) error { errg, _ := errgroup.WithContext(context.Background()) for _, spec := range topo.TiDBServers { endpoint := utils.JoinHostPort(spec.Host, spec.Port) errg.Go(func() error { if tcpProxy != nil { closeC := tcpProxy.Run([]string{endpoint}) defer tcpProxy.Close(closeC) endpoint = tcpProxy.GetEndpoints()[0] } db, err := createDB(endpoint) if err != nil { return err } _, err = db.Exec("create table if not exists test.ti_cluster(id int AUTO_INCREMENT primary key, v int)") if err != nil { return err } _, err = db.Exec("insert into test.ti_cluster (v) values(1)") if err != nil { return err } fmt.Printf("write %s success\n", utils.JoinHostPort(spec.Host, spec.Port)) return nil }) } return errg.Wait() } tiup-1.16.3/components/cluster/command/tls.go000066400000000000000000000041551505422223000212000ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "strings" perrs "github.com/pingcap/errors" "github.com/spf13/cobra" ) func newTLSCmd() *cobra.Command { var ( reloadCertificate bool // reload certificate when the cluster enable encrypted communication cleanCertificate bool // cleanup certificate when the cluster disable encrypted communication enableTLS bool ) cmd := &cobra.Command{ Use: "tls ", Short: "Enable/Disable TLS between TiDB components", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } clusterName := args[0] switch strings.ToLower(args[1]) { case "enable": enableTLS = true case "disable": enableTLS = false default: return perrs.New("enable or disable must be specified at least one") } if enableTLS && cleanCertificate { return perrs.New("clean-certificate only works when tls disable") } if !enableTLS && reloadCertificate { return perrs.New("reload-certificate only works when tls enable") } return cm.TLS(clusterName, gOpt, enableTLS, cleanCertificate, reloadCertificate, skipConfirm) }, } cmd.Flags().BoolVar(&cleanCertificate, "clean-certificate", false, "Cleanup the certificate file if it already exists when tls disable") cmd.Flags().BoolVar(&reloadCertificate, "reload-certificate", false, "Load the certificate file whether it exists or not when tls enable") cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Force enable/disable tls regardless of the current state") return cmd } tiup-1.16.3/components/cluster/command/transfer.go000066400000000000000000000051161505422223000222200ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/spf13/cobra" ) /* Add a pair of adb like commands to transfer files to or from remote servers. Not using `scp` as the real implementation is not necessarily SSH, not using `transfer` all-in-one command to get rid of complex checking of wheather a path is remote or local, as this is supposed to be only a tiny helper utility. */ func newPullCmd() *cobra.Command { opt := manager.TransferOptions{Pull: true} cmd := &cobra.Command{ Use: "pull ", Short: "(EXPERIMENTAL) Transfer files or directories from host in the tidb cluster to local", Hidden: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 3 { return cmd.Help() } clusterName := args[0] opt.Remote = args[1] opt.Local = args[2] return cm.Transfer(clusterName, opt, gOpt) }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only exec on host with specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only exec on host with specified nodes") cmd.Flags().IntVarP(&opt.Limit, "limit", "l", 0, "Limits the used bandwidth, specified in Kbit/s") cmd.Flags().BoolVar(&opt.Compress, "compress", false, "Compression enable. Passes the -C flag to ssh(1) to enable compression.") return cmd } func newPushCmd() *cobra.Command { opt := manager.TransferOptions{Pull: false} cmd := &cobra.Command{ Use: "push ", Short: "(EXPERIMENTAL) Transfer files or directories from local to host in the tidb cluster", Hidden: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 3 { return cmd.Help() } clusterName := args[0] opt.Local = args[1] opt.Remote = args[2] return cm.Transfer(clusterName, opt, gOpt) }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only exec on host with specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only exec on host with specified nodes") return cmd } tiup-1.16.3/components/cluster/command/upgrade.go000066400000000000000000000121321505422223000220170ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "time" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newUpgradeCmd() *cobra.Command { offlineMode := false ignoreVersionCheck := false var tidbVer, tikvVer, pdVer, tsoVer, schedulingVer, tiflashVer, kvcdcVer, dashboardVer, cdcVer, alertmanagerVer, nodeExporterVer, blackboxExporterVer, tiproxyVer string var restartTimeout time.Duration cmd := &cobra.Command{ Use: "upgrade ", Short: "Upgrade a specified TiDB cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } clusterName := args[0] version, err := utils.FmtVer(args[1]) if err != nil { return err } componentVersions := map[string]string{ spec.ComponentDashboard: dashboardVer, spec.ComponentAlertmanager: alertmanagerVer, spec.ComponentTiDB: tidbVer, spec.ComponentTiKV: tikvVer, spec.ComponentPD: pdVer, spec.ComponentTSO: tsoVer, spec.ComponentScheduling: schedulingVer, spec.ComponentTiFlash: tiflashVer, spec.ComponentTiKVCDC: kvcdcVer, spec.ComponentCDC: cdcVer, spec.ComponentTiProxy: tiproxyVer, spec.ComponentBlackboxExporter: blackboxExporterVer, spec.ComponentNodeExporter: nodeExporterVer, } return cm.Upgrade(clusterName, version, componentVersions, gOpt, skipConfirm, offlineMode, ignoreVersionCheck, restartTimeout) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Force upgrade without transferring PD leader") cmd.Flags().Uint64Var(&gOpt.APITimeout, "transfer-timeout", 600, "Timeout in seconds when transferring PD and TiKV store leaders, also for TiCDC drain one capture") cmd.Flags().BoolVarP(&gOpt.IgnoreConfigCheck, "ignore-config-check", "", false, "Ignore the config check result") cmd.Flags().BoolVarP(&offlineMode, "offline", "", false, "Upgrade a stopped cluster") cmd.Flags().BoolVarP(&ignoreVersionCheck, "ignore-version-check", "", false, "Ignore checking if target version is bigger than current version") cmd.Flags().StringVar(&gOpt.SSHCustomScripts.BeforeRestartInstance.Raw, "pre-upgrade-script", "", "Custom script to be executed on each server before the server is upgraded") cmd.Flags().StringVar(&gOpt.SSHCustomScripts.AfterRestartInstance.Raw, "post-upgrade-script", "", "Custom script to be executed on each server after the server is upgraded") // cmd.Flags().StringVar(&tidbVer, "tidb-version", "", "Fix the version of tidb and no longer follows the cluster version.") cmd.Flags().StringVar(&tikvVer, "tikv-version", "", "Fix the version of tikv and no longer follows the cluster version.") cmd.Flags().StringVar(&pdVer, "pd-version", "", "Fix the version of pd and no longer follows the cluster version.") cmd.Flags().StringVar(&tsoVer, "tso-version", "", "Fix the version of tso and no longer follows the cluster version.") cmd.Flags().StringVar(&schedulingVer, "scheduling-version", "", "Fix the version of scheduling and no longer follows the cluster version.") cmd.Flags().StringVar(&tiflashVer, "tiflash-version", "", "Fix the version of tiflash and no longer follows the cluster version.") cmd.Flags().StringVar(&dashboardVer, "tidb-dashboard-version", "", "Fix the version of tidb-dashboard and no longer follows the cluster version.") cmd.Flags().StringVar(&cdcVer, "cdc-version", "", "Fix the version of cdc and no longer follows the cluster version.") cmd.Flags().StringVar(&kvcdcVer, "tikv-cdc-version", "", "Fix the version of tikv-cdc and no longer follows the cluster version.") cmd.Flags().StringVar(&alertmanagerVer, "alertmanager-version", "", "Fix the version of alertmanager and no longer follows the cluster version.") cmd.Flags().StringVar(&nodeExporterVer, "node-exporter-version", "", "Fix the version of node-exporter and no longer follows the cluster version.") cmd.Flags().StringVar(&blackboxExporterVer, "blackbox-exporter-version", "", "Fix the version of blackbox-exporter and no longer follows the cluster version.") cmd.Flags().StringVar(&tiproxyVer, "tiproxy-version", "", "Fix the version of tiproxy and no longer follows the cluster version.") cmd.Flags().DurationVar(&restartTimeout, "restart-timeout", time.Second*0, "Timeout for after upgrade prompt") return cmd } tiup-1.16.3/components/cluster/main.go000066400000000000000000000012731505422223000177020ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "github.com/pingcap/tiup/components/cluster/command" "github.com/pingcap/tiup/pkg/tui" ) func main() { tui.RegisterArg0("tiup cluster") command.Execute() } tiup-1.16.3/components/cluster/main_test.go000066400000000000000000000020751505422223000207420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "os" "strings" "testing" ) // To build: // see build_integration_test in Makefile // To run: // tiup-cluster.test -test.coverprofile={file} __DEVEL--i-heard-you-like-tests func TestMain(t *testing.T) { var ( args []string run bool ) for _, arg := range os.Args { switch { case arg == "__DEVEL--i-heard-you-like-tests": run = true case strings.HasPrefix(arg, "-test"): case strings.HasPrefix(arg, "__DEVEL"): default: args = append(args, arg) } } os.Args = args // fmt.Println(os.Args) if run { main() } } tiup-1.16.3/components/ctl/000077500000000000000000000000001505422223000155255ustar00rootroot00000000000000tiup-1.16.3/components/ctl/main.go000066400000000000000000000056151505422223000170070ustar00rootroot00000000000000package main import ( "errors" "fmt" "os" "os/exec" "path" "strings" "slices" "github.com/fatih/color" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func main() { if err := execute(); err != nil { os.Exit(1) } } func execute() error { ignoreVersion := false home := os.Getenv(localdata.EnvNameComponentInstallDir) if home == "" { return errors.New("component `ctl` cannot run in standalone mode") } rootCmd := &cobra.Command{ Use: "tiup ctl {tidb/pd/tikv/binlog/etcd/cdc/tidb-lightning}", Short: "TiDB controllers", SilenceUsage: true, FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true}, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return cmd.Help() } var transparentParams []string componentSpec := args[0] for i, arg := range os.Args { if arg == componentSpec { transparentParams = os.Args[i+1:] break } } if ignoreVersion { for i, arg := range transparentParams { if arg == "--ignore-version" { transparentParams = slices.Delete(transparentParams, i, i+1) } } } else if os.Getenv(localdata.EnvNameUserInputVersion) == "" { // if user not set component version explicitly return errors.New( "ctl needs an explicit version, please run with `tiup ctl:`, if you continue seeing this error, please upgrade your TiUP with `tiup update --self`", ) } bin, err := binaryPath(home, componentSpec) if err != nil { return err } return run(bin, transparentParams...) }, } originHelpFunc := rootCmd.HelpFunc() rootCmd.SetHelpFunc(func(cmd *cobra.Command, args []string) { if len(args) < 2 { originHelpFunc(cmd, args) return } args = utils.RebuildArgs(args) bin, err := binaryPath(home, args[0]) if err != nil { fmt.Println(color.RedString("Error: %v", err)) return } if err := run(bin, args[1:]...); err != nil { fmt.Println(color.RedString("Error: %v", err)) } }) rootCmd.Flags().BoolVar(&ignoreVersion, "ignore-version", false, "Skip explicit version check") return rootCmd.Execute() } func binaryPath(home, cmd string) (string, error) { switch cmd { case "tidb", "tikv", "pd", "tidb-lightning": return path.Join(home, cmd+"-ctl"), nil case "binlog", "etcd": return path.Join(home, cmd+"ctl"), nil case "cdc": return path.Join(home, cmd+" cli"), nil default: return "", errors.New("ctl only supports tidb, tikv, pd, binlog, tidb-lightning, etcd and cdc currently") } } func run(name string, args ...string) error { os.Setenv("ETCDCTL_API", "3") // Handle `cdc cli` if strings.Contains(name, " ") { xs := strings.Split(name, " ") name = xs[0] args = append(xs[1:], args...) } cmd := exec.Command(name, args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr cmd.Stdin = os.Stdin return cmd.Run() } tiup-1.16.3/components/dm/000077500000000000000000000000001505422223000153435ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/000077500000000000000000000000001505422223000167605ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/import.go000066400000000000000000000422341505422223000206260ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "bufio" "bytes" "context" "fmt" "os" "path/filepath" "strconv" "strings" "time" "github.com/BurntSushi/toml" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/dm/spec" "github.com/pingcap/tiup/pkg/cluster/ansible" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/utils" "github.com/relex/aini" "gopkg.in/ini.v1" "gopkg.in/yaml.v3" ) // ref https://docs.ansible.com/ansible/latest/reference_appendices/config.html#the-configuration-file // Changes can be made and used in a configuration file which will be searched for in the following order: // // ANSIBLE_CONFIG (environment variable if set) // ansible.cfg (in the current directory) // ~/.ansible.cfg (in the home directory) // /etc/ansible/ansible.cfg func searchConfigFile(dir string) (fname string, err error) { // ANSIBLE_CONFIG (environment variable if set) if v := os.Getenv("ANSIBLE_CONFIG"); len(v) > 0 { return v, nil } // ansible.cfg (in the current directory) f := filepath.Join(dir, "ansible.cfg") if utils.IsExist(f) { return f, nil } // ~/.ansible.cfg (in the home directory) home, err := os.UserHomeDir() if err != nil { return "", errors.AddStack(err) } f = filepath.Join(home, ".ansible.cfg") if utils.IsExist(f) { return f, nil } // /etc/ansible/ansible.cfg f = "/etc/ansible/ansible.cfg" if utils.IsExist(f) { return f, nil } return "", errors.Errorf("can not found ansible.cfg, dir: %s", dir) } func readConfigFile(dir string) (file *ini.File, err error) { fname, err := searchConfigFile(dir) if err != nil { return nil, err } file, err = ini.Load(fname) if err != nil { return nil, errors.Annotatef(err, "failed to load ini: %s", fname) } return } func firstNonEmpty(ss ...string) string { for _, s := range ss { if s != "" { return s } } return "" } func getAbsPath(dir string, path string) string { if filepath.IsAbs(path) { return path } path = filepath.Join(dir, path) return path } // ExecutorGetter get the executor by host. type ExecutorGetter interface { Get(host string) (e ctxt.Executor) } // Importer used for import from ansible. // ref DM docs: https://docs.pingcap.com/zh/tidb-data-migration/dev/deploy-a-dm-cluster-using-ansible type Importer struct { dir string // ansible directory. inventoryFileName string sshType executor.SSHType sshTimeout uint64 // following vars parse from ansbile user string sources map[string]*SourceConfig // addr(ip:port) -> SourceConfig // only use for test. // when set, we use this executor instead of getting a truly one. testExecutorGetter ExecutorGetter } // NewImporter create an Importer. // @sshTimeout: set 0 to use a default value func NewImporter(ansibleDir, inventoryFileName string, sshType executor.SSHType, sshTimeout uint64) (*Importer, error) { dir, err := filepath.Abs(ansibleDir) if err != nil { return nil, errors.AddStack(err) } return &Importer{ dir: dir, inventoryFileName: inventoryFileName, sources: make(map[string]*SourceConfig), sshType: sshType, sshTimeout: sshTimeout, }, nil } func (im *Importer) getExecutor(host string, port int) (e ctxt.Executor, err error) { if im.testExecutorGetter != nil { return im.testExecutorGetter.Get(host), nil } keypath := ansible.SSHKeyPath() cfg := executor.SSHConfig{ Host: host, Port: port, User: im.user, KeyFile: keypath, Timeout: time.Second * time.Duration(im.sshTimeout), } e, err = executor.New(im.sshType, false, cfg) return } func (im *Importer) fetchFile(ctx context.Context, host string, port int, fname string) (data []byte, err error) { e, err := im.getExecutor(host, port) if err != nil { return nil, errors.Annotatef(err, "failed to get executor, target: %s", utils.JoinHostPort(host, port)) } tmp, err := os.MkdirTemp("", "tiup") if err != nil { return nil, errors.AddStack(err) } defer os.RemoveAll(tmp) tmp = filepath.Join(tmp, filepath.Base(fname)) err = e.Transfer(ctx, fname, tmp, true /*download*/, 0, false) if err != nil { return nil, errors.Annotatef(err, "transfer %s from %s", fname, utils.JoinHostPort(host, port)) } data, err = os.ReadFile(tmp) if err != nil { return nil, errors.AddStack(err) } return } func setConfig(config *map[string]any, k string, v any) { if *config == nil { *config = make(map[string]any) } (*config)[k] = v } // handleWorkerConfig fetch the config file of worker and generate the source // which we need for the master. func (im *Importer) handleWorkerConfig(ctx context.Context, srv *spec.WorkerSpec, fname string) error { data, err := im.fetchFile(ctx, srv.Host, srv.SSHPort, fname) if err != nil { return err } config := new(Config) err = toml.Unmarshal(data, config) if err != nil { return errors.AddStack(err) } source := config.ToSource() im.sources[srv.Host+":"+strconv.Itoa(srv.Port)] = source return nil } // ScpSourceToMaster scp the source files to master, // and set V1SourcePath of the master spec. func (im *Importer) ScpSourceToMaster(ctx context.Context, topo *spec.Specification) (err error) { for i := 0; i < len(topo.Masters); i++ { master := topo.Masters[i] target := filepath.Join(firstNonEmpty(master.DeployDir, topo.GlobalOptions.DeployDir), "v1source") master.V1SourcePath = target e, err := im.getExecutor(master.Host, master.SSHPort) if err != nil { return errors.Annotatef(err, "failed to get executor, target: %s", utils.JoinHostPort(master.Host, master.SSHPort)) } _, stderr, err := e.Execute(ctx, "mkdir -p "+target, false) if err != nil { return errors.Annotatef(err, "failed to execute: %s", string(stderr)) } for addr, source := range im.sources { f, err := os.CreateTemp("", "tiup-dm-*") if err != nil { return errors.AddStack(err) } data, err := yaml.Marshal(source) if err != nil { return errors.AddStack(err) } _, err = f.Write(data) if err != nil { return errors.AddStack(err) } err = e.Transfer(ctx, f.Name(), filepath.Join(target, addr+".yml"), false, 0, false) if err != nil { return err } } } return nil } func instancDeployDir(comp string, port int, hostDir string, globalDir string) string { if hostDir != globalDir { return filepath.Join(hostDir, fmt.Sprintf("%s-%d", comp, port)) } return "" } // ImportFromAnsibleDir generate the metadata from ansible deployed cluster. // //revive:disable func (im *Importer) ImportFromAnsibleDir(ctx context.Context) (clusterName string, meta *spec.Metadata, err error) { dir := im.dir inventoryFileName := im.inventoryFileName cfg, err := readConfigFile(dir) if err != nil { return "", nil, err } fname := filepath.Join(dir, inventoryFileName) file, err := os.Open(fname) if err != nil { return "", nil, errors.AddStack(err) } inventory, err := aini.Parse(file) if err != nil { return "", nil, errors.AddStack(err) } meta = &spec.Metadata{ Topology: new(spec.Specification), } topo := meta.Topology // Grafana admin username and password var grafanaUser string var grafanaPass string if group, ok := inventory.Groups["all"]; ok { for k, v := range group.Vars { switch k { case "ansible_user": meta.User = v im.user = v case "dm_version": meta.Version = v case "cluster_name": clusterName = v case "deploy_dir": topo.GlobalOptions.DeployDir = v // ansible convention directory for log topo.GlobalOptions.LogDir = filepath.Join(v, "log") case "grafana_admin_user": grafanaUser = strings.Trim(v, "\"") case "grafana_admin_password": grafanaPass = strings.Trim(v, "\"") default: fmt.Println("ignore unknown global var ", k, v) } } } for gname, group := range inventory.Groups { switch gname { case "dm_master_servers": for _, host := range group.Hosts { srv := &spec.MasterSpec{ Host: host.Vars["ansible_host"], SSHPort: ansible.GetHostPort(host, cfg), Imported: true, } runFileName := filepath.Join(host.Vars["deploy_dir"], "scripts", "run_dm-master.sh") data, err := im.fetchFile(ctx, srv.Host, srv.SSHPort, runFileName) if err != nil { return "", nil, err } deployDir, flags, err := parseRunScript(data) if err != nil { return "", nil, err } if deployDir == "" { return "", nil, errors.Errorf("unexpected run script %s, can get deploy dir", runFileName) } for k, v := range flags { switch k { case "master-addr": ar := strings.Split(v, ":") port, err := strconv.Atoi(ar[len(ar)-1]) if err != nil { return "", nil, errors.AddStack(err) } srv.Port = port // srv.PeerPort use default value case "L": // in tiup, must set in Config. setConfig(&srv.Config, "log-level", v) case "config": // Ignore the config file, nothing we care. case "log-file": srv.LogDir = filepath.Dir(getAbsPath(deployDir, v)) default: fmt.Printf("ignore unknown arg %s=%s in run script %s\n", k, v, runFileName) } } srv.DeployDir = instancDeployDir(spec.ComponentDMMaster, srv.Port, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) topo.Masters = append(topo.Masters, srv) } case "dm_worker_servers": for _, host := range group.Hosts { srv := &spec.WorkerSpec{ Host: host.Vars["ansible_host"], SSHPort: ansible.GetHostPort(host, cfg), DeployDir: firstNonEmpty(host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir), Imported: true, } runFileName := filepath.Join(host.Vars["deploy_dir"], "scripts", "run_dm-worker.sh") data, err := im.fetchFile(ctx, srv.Host, srv.SSHPort, runFileName) if err != nil { return "", nil, err } deployDir, flags, err := parseRunScript(data) if err != nil { return "", nil, err } if deployDir == "" { return "", nil, errors.Errorf("unexpected run script %s, can not get deploy directory", runFileName) } var configFileName string for k, v := range flags { switch k { case "worker-addr": ar := strings.Split(v, ":") port, err := strconv.Atoi(ar[len(ar)-1]) if err != nil { return "", nil, errors.AddStack(err) } srv.Port = port case "L": // in tiup, must set in Config. setConfig(&srv.Config, "log-level", v) case "config": configFileName = getAbsPath(deployDir, v) case "log-file": srv.LogDir = filepath.Dir(getAbsPath(deployDir, v)) case "relay-dir": // Safe to ignore this default: fmt.Printf("ignore unknown arg %s=%s in run script %s\n", k, v, runFileName) } } // Deploy dir MUST always keep the same and CAN NOT change. // dm-worker will save the data in the wording directory and there's no configuration // to specific the directory. // We will always set the wd as DeployDir. srv.DeployDir = deployDir err = im.handleWorkerConfig(ctx, srv, configFileName) if err != nil { return "", nil, err } topo.Workers = append(topo.Workers, srv) } case "dm_portal_servers": fmt.Println("ignore deprecated dm_portal_servers") case "prometheus_servers": for _, host := range group.Hosts { srv := &spec.PrometheusSpec{ Host: host.Vars["ansible_host"], SSHPort: ansible.GetHostPort(host, cfg), DeployDir: firstNonEmpty(host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir), Imported: true, } runFileName := filepath.Join(host.Vars["deploy_dir"], "scripts", "run_prometheus.sh") data, err := im.fetchFile(ctx, srv.Host, srv.SSHPort, runFileName) if err != nil { return "", nil, err } deployDir, flags, err := parseRunScript(data) if err != nil { return "", nil, err } if deployDir == "" { return "", nil, errors.Errorf("unexpected run script %s, can get deploy dir", runFileName) } for k, v := range flags { // just get data directory and port, ignore all other flags. switch k { case "storage.tsdb.path": srv.DataDir = getAbsPath(deployDir, v) case "web.listen-address": ar := strings.Split(v, ":") port, err := strconv.Atoi(ar[len(ar)-1]) if err != nil { return "", nil, errors.AddStack(err) } srv.Port = port case "STDOUT": srv.LogDir = filepath.Dir(getAbsPath(deployDir, v)) case "config.file", "web.external-url", "log.level", "storage.tsdb.retention": // ignore intent default: fmt.Printf("ignore unknown arg %s=%s in run script %s\n", k, v, runFileName) } } srv.DeployDir = instancDeployDir(spec.ComponentPrometheus, srv.Port, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) topo.Monitors = append(topo.Monitors, srv) } case "alertmanager_servers": for _, host := range group.Hosts { srv := &spec.AlertmanagerSpec{ Host: host.Vars["ansible_host"], SSHPort: ansible.GetHostPort(host, cfg), DeployDir: firstNonEmpty(host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir), Imported: true, } runFileName := filepath.Join(host.Vars["deploy_dir"], "scripts", "run_alertmanager.sh") data, err := im.fetchFile(ctx, srv.Host, srv.SSHPort, runFileName) if err != nil { return "", nil, err } deployDir, flags, err := parseRunScript(data) if err != nil { return "", nil, err } if deployDir == "" { return "", nil, errors.Errorf("unexpected run script %s, can get deploy dir", runFileName) } for k, v := range flags { switch k { case "storage.path": srv.DataDir = getAbsPath(deployDir, v) case "web.listen-address": ar := strings.Split(v, ":") port, err := strconv.Atoi(ar[len(ar)-1]) if err != nil { return "", nil, errors.AddStack(err) } srv.WebPort = port case "STDOUT": srv.LogDir = filepath.Dir(getAbsPath(deployDir, v)) case "config.file", "data.retention", "log.level": // ignore default: fmt.Printf("ignore unknown arg %s=%s in run script %s\n", k, v, runFileName) } } srv.DeployDir = instancDeployDir(spec.ComponentAlertmanager, srv.WebPort, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) topo.Alertmanagers = append(topo.Alertmanagers, srv) } case "grafana_servers": for _, host := range group.Hosts { // Do not fetch the truly used config file of Grafana, // get port directly from ansible ini files. port := 3000 if v, ok := host.Vars["grafana_port"]; ok { if iv, err := strconv.Atoi(v); err == nil { port = iv } } srv := &spec.GrafanaSpec{ Host: host.Vars["ansible_host"], SSHPort: ansible.GetHostPort(host, cfg), Port: port, Username: grafanaUser, Password: grafanaPass, Imported: true, } runFileName := filepath.Join(host.Vars["deploy_dir"], "scripts", "run_grafana.sh") data, err := im.fetchFile(ctx, srv.Host, srv.SSHPort, runFileName) if err != nil { return "", nil, err } _, _, err = parseRunScript(data) if err != nil { return "", nil, err } srv.DeployDir = instancDeployDir(spec.ComponentGrafana, srv.Port, host.Vars["deploy_dir"], topo.GlobalOptions.DeployDir) topo.Grafanas = append(topo.Grafanas, srv) } case "all", "ungrouped": // ignore intent default: fmt.Println("ignore unknown group ", gname) } } return } // parseRunScript parse the run script generate by dm-ansible // flags contains the flags of command line, adding a key "STDOUT" // if it redirect the stdout to a file. func parseRunScript(data []byte) (deployDir string, flags map[string]string, err error) { scanner := bufio.NewScanner(bytes.NewBuffer(data)) flags = make(map[string]string) for scanner.Scan() { line := scanner.Text() line = strings.TrimSpace(line) // parse "DEPLOY_DIR=/home/tidb/deploy" prefix := "DEPLOY_DIR=" if strings.HasPrefix(line, prefix) { deployDir = line[len(prefix):] deployDir = strings.TrimSpace(deployDir) continue } // parse such line: // exec > >(tee -i -a "/home/tidb/deploy/log/alertmanager.log") // // get the file path, as a "STDOUT" flag. if strings.Contains(line, "tee -i -a") { left := strings.Index(line, "\"") right := strings.LastIndex(line, "\"") if left < right { v := line[left+1 : right] flags["STDOUT"] = v } } // trim the ">> /path/to/file ..." part if index := strings.Index(line, ">>"); index != -1 { line = line[:index] } line = strings.TrimSuffix(line, "\\") line = strings.TrimSpace(line) // parse flag if strings.HasPrefix(line, "-") { seps := strings.Split(line, "=") if len(seps) != 2 { continue } k := strings.TrimLeft(seps[0], "-") v := strings.Trim(seps[1], "\"") flags[k] = v } } return } tiup-1.16.3/components/dm/ansible/import_test.go000066400000000000000000000165521505422223000216710ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "context" "os" "path/filepath" "strconv" "strings" "testing" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/dm/spec" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/stretchr/testify/require" ) // localExecutor only used for test. type localExecutor struct { host string executor.Local } type executorGetter struct { } var _ ExecutorGetter = &executorGetter{} // Get implements ExecutorGetter interface. func (g *executorGetter) Get(host string) ctxt.Executor { return &localExecutor{ host: host, } } // Transfer implements executor interface. // Replace the deploy directory as the local one in testdata, so we can fetch it. func (l *localExecutor) Transfer(ctx context.Context, src, target string, download bool, limit int, _ bool) error { mydeploy, err := filepath.Abs("./testdata/deploy_dir/" + l.host) if err != nil { return errors.AddStack(err) } src = strings.Replace(src, "/home/tidb/deploy", mydeploy, 1) return l.Local.Transfer(ctx, src, target, download, 0, false) } func TestParseRunScript(t *testing.T) { assert := require.New(t) // parse run_dm-master.sh data, err := os.ReadFile("./testdata/deploy_dir/172.19.0.101/scripts/run_dm-master.sh") assert.Nil(err) dir, flags, err := parseRunScript(data) assert.Nil(err) assert.Equal("/home/tidb/deploy", dir) expectedFlags := map[string]string{ "master-addr": ":8261", "L": "info", "config": "conf/dm-master.toml", "log-file": "/home/tidb/deploy/log/dm-master.log", } assert.Equal(expectedFlags, flags) // parse run_dm-worker.sh data, err = os.ReadFile("./testdata/deploy_dir/172.19.0.101/scripts/run_dm-worker.sh") assert.Nil(err) dir, flags, err = parseRunScript(data) assert.Nil(err) assert.Equal("/home/tidb/deploy", dir) expectedFlags = map[string]string{ "worker-addr": ":8262", "L": "info", "relay-dir": "/home/tidb/deploy/relay_log", "config": "conf/dm-worker.toml", "log-file": "/home/tidb/deploy/log/dm-worker.log", } assert.Equal(expectedFlags, flags) // parse run_prometheus.sh data, err = os.ReadFile("./testdata/deploy_dir/172.19.0.101/scripts/run_prometheus.sh") assert.Nil(err) dir, flags, err = parseRunScript(data) assert.Nil(err) assert.Equal("/home/tidb/deploy", dir) expectedFlags = map[string]string{ "STDOUT": "/home/tidb/deploy/log/prometheus.log", "config.file": "/home/tidb/deploy/conf/prometheus.yml", "web.listen-address": ":9090", "web.external-url": "http://172.19.0.101:9090/", "log.level": "info", "storage.tsdb.path": "/home/tidb/deploy/prometheus.data.metrics", "storage.tsdb.retention": "15d", } assert.Equal(expectedFlags, flags) // parse run_grafana.sh data, err = os.ReadFile("./testdata/deploy_dir/172.19.0.101/scripts/run_grafana.sh") assert.Nil(err) dir, flags, err = parseRunScript(data) assert.Nil(err) assert.Equal("/home/tidb/deploy", dir) expectedFlags = map[string]string{ "homepath": "/home/tidb/deploy/opt/grafana", "config": "/home/tidb/deploy/opt/grafana/conf/grafana.ini", } assert.Equal(expectedFlags, flags) // parse run_alertmanager.sh data, err = os.ReadFile("./testdata/deploy_dir/172.19.0.101/scripts/run_alertmanager.sh") assert.Nil(err) dir, flags, err = parseRunScript(data) assert.Nil(err) assert.Equal("/home/tidb/deploy", dir) expectedFlags = map[string]string{ "STDOUT": "/home/tidb/deploy/log/alertmanager.log", "config.file": "conf/alertmanager.yml", "storage.path": "/home/tidb/deploy/data.alertmanager", "data.retention": "120h", "log.level": "info", "web.listen-address": ":9093", } assert.Equal(expectedFlags, flags) } func TestImportFromAnsible(t *testing.T) { assert := require.New(t) dir := "./testdata/ansible" im, err := NewImporter(dir, "inventory.ini", executor.SSHTypeBuiltin, 0) assert.Nil(err) im.testExecutorGetter = &executorGetter{} clusterName, meta, err := im.ImportFromAnsibleDir(ctxt.New( context.Background(), 0, logprinter.NewLogger(""), )) assert.Nil(err, "verbose: %+v", err) assert.Equal("test-cluster", clusterName) assert.Equal("tidb", meta.User) assert.Equal("v1.0.6", meta.Version) // check GlobalOptions topo := meta.Topology assert.Equal("/home/tidb/deploy", topo.GlobalOptions.DeployDir) assert.Equal("/home/tidb/deploy/log", topo.GlobalOptions.LogDir) // check master assert.Len(topo.Masters, 1) master := topo.Masters[0] expectedMaster := &spec.MasterSpec{ Host: "172.19.0.101", SSHPort: 22, Port: 8261, DeployDir: "", LogDir: "/home/tidb/deploy/log", Config: map[string]any{"log-level": "info"}, Imported: true, } assert.Equal(expectedMaster, master) // check worker assert.Len(topo.Workers, 2) if topo.Workers[0].Host > topo.Workers[1].Host { topo.Workers[0], topo.Workers[1] = topo.Workers[1], topo.Workers[0] } expectedWorker := &spec.WorkerSpec{ Host: "172.19.0.101", SSHPort: 22, Port: 8262, DeployDir: "/home/tidb/deploy", LogDir: "/home/tidb/deploy/log", Config: map[string]any{"log-level": "info"}, Imported: true, } worker := topo.Workers[0] assert.Equal(expectedWorker, worker) expectedWorker.Host = "172.19.0.102" worker = topo.Workers[1] assert.Equal(expectedWorker, worker) // check Alertmanager assert.Len(topo.Alertmanagers, 1) aler := topo.Alertmanagers[0] expectedAlter := &spec.AlertmanagerSpec{ Host: "172.19.0.101", SSHPort: 22, WebPort: 9093, DeployDir: "", DataDir: "/home/tidb/deploy/data.alertmanager", LogDir: "/home/tidb/deploy/log", Imported: true, } assert.Equal(expectedAlter, aler) // Check Grafana assert.Len(topo.Grafanas, 1) grafana := topo.Grafanas[0] expectedGrafana := &spec.GrafanaSpec{ Host: "172.19.0.101", SSHPort: 22, DeployDir: "", Port: 3001, Username: "foo", Password: "bar", Imported: true, } assert.Equal(expectedGrafana, grafana) // Check Monitor(Prometheus) assert.Len(topo.Monitors, 1) monitor := topo.Monitors[0] expectedMonitor := &spec.PrometheusSpec{ Host: "172.19.0.101", SSHPort: 22, DeployDir: "", DataDir: "/home/tidb/deploy/prometheus.data.metrics", LogDir: "/home/tidb/deploy/log", Port: 9090, Imported: true, } assert.Equal(expectedMonitor, monitor) // Check sources assert.Len(im.sources, 2) s := im.sources[topo.Workers[0].Host+":"+strconv.Itoa(topo.Workers[0].Port)] assert.Equal("mysql-replica-01", s.SourceID) assert.Equal(DBConfig{ Host: "mysql1", Password: "password1", Port: 3306, User: "root", }, s.From) s = im.sources[topo.Workers[1].Host+":"+strconv.Itoa(topo.Workers[1].Port)] assert.Equal("mysql-replica-02", s.SourceID) assert.Equal(DBConfig{ Host: "mysql2", Port: 3306, User: "root", }, s.From) } tiup-1.16.3/components/dm/ansible/testdata/000077500000000000000000000000001505422223000205715ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/ansible/000077500000000000000000000000001505422223000222065ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/ansible/ansible.cfg000066400000000000000000000021331505422223000243030ustar00rootroot00000000000000[defaults] ## Customize this! inventory = inventory.ini transport = ssh # disable SSH key host checking host_key_checking = False # gathering = smart gathering = explicit fact_caching = jsonfile fact_caching_connection = fact_files retry_files_save_path = retry_files #remote_tmp = /tmp/ansible # for slow connections timeout = 10 gather_subset = network,hardware # if ssh port is not 22 #remote_port = 22 # for fun # cow_selection = random stdout_callback = skippy # log information about executions at the designated location log_path = log/ansible.log deprecation_warnings = False [ssh_connection] ## AWS key connection # ssh_args = -i aws.key -C -o ControlMaster=auto -o ControlPersist=60s ## Jumper host connection # ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o ProxyCommand="ssh user@host -p 22 nc %h %p" ## Default # ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s ## Use custom ssh config file # ssh_args = -F ssh_config #scp_if_ssh = True # close when using a jumper host, or have TTY errors # Ubuntu is OK, while CentOS may cause errors # pipelining = True tiup-1.16.3/components/dm/ansible/testdata/ansible/hosts.ini000066400000000000000000000001471505422223000240510ustar00rootroot00000000000000[servers] 172.19.0.101 172.19.0.102 172.19.0.103 172.19.0.104 172.19.0.105 [all:vars] username = tidb tiup-1.16.3/components/dm/ansible/testdata/ansible/inventory.ini000066400000000000000000000020751505422223000247500ustar00rootroot00000000000000## DM modules [dm_master_servers] dm_master ansible_host=172.19.0.101 [dm_worker_servers] dm-worker1 ansible_host=172.19.0.101 server_id=101 source_id="mysql-replica-01" mysql_host=mysql1 mysql_user=root mysql_password='password1' mysql_port=3306 dm-worker2 ansible_host=172.19.0.102 server_id=102 source_id="mysql-replica-02" mysql_host=mysql2 mysql_user=root mysql_password='' mysql_port=3306 [dm_portal_servers] dm_portal ansible_host=172.19.0.101 ## Monitoring modules [prometheus_servers] prometheus ansible_host=172.19.0.101 [grafana_servers] ; grafana ansible_host=172.19.0.101 ; change to add specified port for test, ref: https://docs.pingcap.com/zh/tidb-data-migration/dev/deploy-a-dm-cluster-using-ansible#%E9%BB%98%E8%AE%A4%E6%9C%8D%E5%8A%A1%E7%AB%AF%E5%8F%A3 grafana ansible_host=172.19.0.101 grafana_port=3001 [alertmanager_servers] alertmanager ansible_host=172.19.0.101 ## Global variables [all:vars] cluster_name = test-cluster ansible_user = tidb dm_version = v1.0.6 deploy_dir = /home/tidb/deploy grafana_admin_user = "foo" grafana_admin_password = bar tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/000077500000000000000000000000001505422223000227235ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/000077500000000000000000000000001505422223000241225ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/conf/000077500000000000000000000000001505422223000250475ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/conf/dm-master.toml000066400000000000000000000002571505422223000276410ustar00rootroot00000000000000# Master Configuration. [[deploy]] source-id = "mysql-replica-01" dm-worker = "172.19.0.101:8262" [[deploy]] source-id = "mysql-replica-02" dm-worker = "172.19.0.102:8262" tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/conf/dm-worker.toml000066400000000000000000000005311505422223000276520ustar00rootroot00000000000000# Worker Configuration. server-id = 101 source-id = "mysql-replica-01" flavor = "mysql" enable-gtid = false #charset of DSN of source mysql/mariadb instance charset = "" meta-dir = "" [from] host = "mysql1" user = "root" password = "password1" port = 3306 #relay log purge strategy [purge] interval = 3600 expires = 0 remain-space = 15 tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/scripts/000077500000000000000000000000001505422223000256115ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/scripts/run_alertmanager.sh000077500000000000000000000007431505422223000315020ustar00rootroot00000000000000#!/bin/bash set -e ulimit -n 1000000 DEPLOY_DIR=/home/tidb/deploy cd "${DEPLOY_DIR}" || exit 1 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! exec > >(tee -i -a "/home/tidb/deploy/log/alertmanager.log") exec 2>&1 exec bin/alertmanager \ --config.file="conf/alertmanager.yml" \ --storage.path="/home/tidb/deploy/data.alertmanager" \ --data.retention=120h \ --log.level="info" \ --web.listen-address=":9093" tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/scripts/run_dm-master.sh000077500000000000000000000007031505422223000307250ustar00rootroot00000000000000#!/bin/bash set -e ulimit -n 1000000 DEPLOY_DIR=/home/tidb/deploy cd "${DEPLOY_DIR}" || exit 1 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! exec bin/dm-master \ --master-addr=":8261" \ -L="info" \ --config="conf/dm-master.toml" \ --log-file="/home/tidb/deploy/log/dm-master.log" >> "/home/tidb/deploy/log/dm-master-stdout.log" 2>> "/home/tidb/deploy/log/dm-master-stderr.log" tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/scripts/run_dm-worker.sh000077500000000000000000000007631505422223000307510ustar00rootroot00000000000000#!/bin/bash set -e ulimit -n 1000000 DEPLOY_DIR=/home/tidb/deploy cd "${DEPLOY_DIR}" || exit 1 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! exec bin/dm-worker \ --worker-addr=":8262" \ -L="info" \ --relay-dir="/home/tidb/deploy/relay_log" \ --config="conf/dm-worker.toml" \ --log-file="/home/tidb/deploy/log/dm-worker.log" >> "/home/tidb/deploy/log/dm-worker-stdout.log" 2>> "/home/tidb/deploy/log/dm-worker-stderr.log" tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/scripts/run_grafana.sh000077500000000000000000000005661505422223000304420ustar00rootroot00000000000000#!/bin/bash set -e ulimit -n 1000000 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR=/home/tidb/deploy cd "${DEPLOY_DIR}" || exit 1 LANG=en_US.UTF-8 \ exec opt/grafana/bin/grafana-server \ --homepath="/home/tidb/deploy/opt/grafana" \ --config="/home/tidb/deploy/opt/grafana/conf/grafana.ini" tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.101/scripts/run_prometheus.sh000077500000000000000000000011251505422223000312260ustar00rootroot00000000000000#!/bin/bash set -e ulimit -n 1000000 DEPLOY_DIR=/home/tidb/deploy cd "${DEPLOY_DIR}" || exit 1 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! exec > >(tee -i -a "/home/tidb/deploy/log/prometheus.log") exec 2>&1 exec bin/prometheus \ --config.file="/home/tidb/deploy/conf/prometheus.yml" \ --web.listen-address=":9090" \ --web.external-url="http://172.19.0.101:9090/" \ --web.enable-admin-api \ --log.level="info" \ --storage.tsdb.path="/home/tidb/deploy/prometheus.data.metrics" \ --storage.tsdb.retention="15d" tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.102/000077500000000000000000000000001505422223000241235ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.102/conf/000077500000000000000000000000001505422223000250505ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.102/conf/dm-worker.toml000066400000000000000000000005201505422223000276510ustar00rootroot00000000000000# Worker Configuration. server-id = 102 source-id = "mysql-replica-02" flavor = "mysql" enable-gtid = false #charset of DSN of source mysql/mariadb instance charset = "" meta-dir = "" [from] host = "mysql2" user = "root" password = "" port = 3306 #relay log purge strategy [purge] interval = 3600 expires = 0 remain-space = 15 tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.102/scripts/000077500000000000000000000000001505422223000256125ustar00rootroot00000000000000tiup-1.16.3/components/dm/ansible/testdata/deploy_dir/172.19.0.102/scripts/run_dm-worker.sh000066400000000000000000000007631505422223000307470ustar00rootroot00000000000000#!/bin/bash set -e ulimit -n 1000000 DEPLOY_DIR=/home/tidb/deploy cd "${DEPLOY_DIR}" || exit 1 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! exec bin/dm-worker \ --worker-addr=":8262" \ -L="info" \ --relay-dir="/home/tidb/deploy/relay_log" \ --config="conf/dm-worker.toml" \ --log-file="/home/tidb/deploy/log/dm-worker.log" >> "/home/tidb/deploy/log/dm-worker-stdout.log" 2>> "/home/tidb/deploy/log/dm-worker-stderr.log" tiup-1.16.3/components/dm/ansible/testdata/readme.txt000066400000000000000000000003421505422223000225660ustar00rootroot00000000000000This directory contains files when using dm-ansible deploying dm v1.0.6 only remain some files usable for test. ansible: contains the files in ansibile directory . deploy_dir: contains the files of deploy directory per host. tiup-1.16.3/components/dm/ansible/worker.go000066400000000000000000000070711505422223000206250ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible // Config Copy from https://github.com/pingcap/dm/blob/21a6e6e580f2e911edbe2400241bd95de2f7ef43/dm/worker/config.go#L93 // remove some unconcern parts. type Config struct { LogLevel string `toml:"log-level" json:"log-level"` LogFile string `toml:"log-file" json:"log-file"` LogFormat string `toml:"log-format" json:"log-format"` LogRotate string `toml:"log-rotate" json:"log-rotate"` WorkerAddr string `toml:"worker-addr" json:"worker-addr"` EnableGTID bool `toml:"enable-gtid" json:"enable-gtid"` AutoFixGTID bool `toml:"auto-fix-gtid" json:"auto-fix-gtid"` RelayDir string `toml:"relay-dir" json:"relay-dir"` MetaDir string `toml:"meta-dir" json:"meta-dir"` ServerID uint32 `toml:"server-id" json:"server-id"` Flavor string `toml:"flavor" json:"flavor"` Charset string `toml:"charset" json:"charset"` // relay synchronous starting point (if specified) RelayBinLogName string `toml:"relay-binlog-name" json:"relay-binlog-name"` RelayBinlogGTID string `toml:"relay-binlog-gtid" json:"relay-binlog-gtid"` SourceID string `toml:"source-id" json:"source-id"` From DBConfig `toml:"from" json:"from"` } // DBConfig of db. type DBConfig struct { Host string `toml:"host" json:"host" yaml:"host"` Port int `toml:"port" json:"port" yaml:"port"` User string `toml:"user" json:"user" yaml:"user"` Password string `toml:"password" json:"-" yaml:"password"` // omit it for privacy MaxAllowedPacket *int `toml:"max-allowed-packet" json:"max-allowed-packet" yaml:"max-allowed-packet"` } // SourceConfig is the configuration for Worker // ref: https://github.com/pingcap/dm/blob/3730a4e231091c5d65130d15a6c09a3b9fa3255e/dm/config/source_config.go#L51 type SourceConfig struct { EnableGTID bool `yaml:"enable-gtid" toml:"enable-gtid" json:"enable-gtid"` AutoFixGTID bool `yaml:"auto-fix-gtid" toml:"auto-fix-gtid" json:"auto-fix-gtid"` RelayDir string `yaml:"relay-dir" toml:"relay-dir" json:"relay-dir"` MetaDir string `yaml:"meta-dir" toml:"meta-dir" json:"meta-dir"` Flavor string `yaml:"flavor" toml:"flavor" json:"flavor"` Charset string `yaml:"charset" toml:"charset" json:"charset"` EnableRelay bool `yaml:"enable-relay" toml:"enable-relay" json:"enable-relay"` // relay synchronous starting point (if specified) RelayBinLogName string `yaml:"relay-binlog-name" toml:"relay-binlog-name" json:"relay-binlog-name"` RelayBinlogGTID string `yaml:"relay-binlog-gtid" toml:"relay-binlog-gtid" json:"relay-binlog-gtid"` SourceID string `yaml:"source-id" toml:"source-id" json:"source-id"` From DBConfig `yaml:"from" toml:"from" json:"from"` } // ToSource generate the SourceConfig for DM 2.0 func (c *Config) ToSource() (source *SourceConfig) { source = &SourceConfig{ EnableGTID: c.EnableGTID, AutoFixGTID: c.AutoFixGTID, RelayDir: c.RelayDir, MetaDir: c.MetaDir, Flavor: c.Flavor, // EnableRelay: RelayBinLogName: c.RelayBinLogName, RelayBinlogGTID: c.RelayBinlogGTID, SourceID: c.SourceID, From: c.From, } return } tiup-1.16.3/components/dm/command/000077500000000000000000000000001505422223000167615ustar00rootroot00000000000000tiup-1.16.3/components/dm/command/audit.go000066400000000000000000000032371505422223000204230ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/audit" cspec "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/spf13/cobra" ) var retainDays int func newAuditCmd() *cobra.Command { cmd := &cobra.Command{ Use: "audit [audit-id]", Short: "Show audit log of cluster operation", RunE: func(cmd *cobra.Command, args []string) error { switch len(args) { case 0: return audit.ShowAuditList(cspec.AuditDir()) case 1: return audit.ShowAuditLog(cspec.AuditDir(), args[0]) default: return cmd.Help() } }, } cmd.AddCommand(newAuditCleanupCmd()) return cmd } func newAuditCleanupCmd() *cobra.Command { cmd := &cobra.Command{ Use: "cleanup", Short: "cleanup dm audit logs", RunE: func(cmd *cobra.Command, args []string) error { if retainDays < 0 { return errors.Errorf("retain-days cannot be less than 0") } err := audit.DeleteAuditLog(cspec.AuditDir(), retainDays, skipConfirm, gOpt.DisplayMode) if err != nil { return err } return nil }, } cmd.Flags().IntVar(&retainDays, "retain-days", 60, "Number of days to keep audit logs for deletion") return cmd } tiup-1.16.3/components/dm/command/deploy.go000066400000000000000000000061611505422223000206100ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "context" "path" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/manager" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newDeployCmd() *cobra.Command { opt := manager.DeployOptions{ IdentityFile: path.Join(utils.UserHome(), ".ssh", "id_rsa"), } cmd := &cobra.Command{ Use: "deploy ", Short: "Deploy a DM cluster for production", Long: "Deploy a DM cluster for production. SSH connection will be used to deploy files, as well as creating system users for running the service.", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { shouldContinue, err := tui.CheckCommandArgsAndMayPrintHelp(cmd, args, 3) if err != nil { return err } if !shouldContinue { return nil } clusterName := args[0] version, err := utils.FmtVer(args[1]) if err != nil { return err } topoFile := args[2] if err := supportVersion(version); err != nil { return err } return cm.Deploy(clusterName, version, topoFile, opt, postDeployHook, skipConfirm, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 2: return nil, cobra.ShellCompDirectiveDefault default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVarP(&opt.User, "user", "u", utils.CurrentUser(), "The user name to login via SSH. The user must has root (or sudo) privilege.") cmd.Flags().StringVarP(&opt.IdentityFile, "identity_file", "i", opt.IdentityFile, "The path of the SSH identity file. If specified, public key authentication will be used.") cmd.Flags().BoolVarP(&opt.UsePassword, "password", "p", false, "Use password of target hosts. If specified, password authentication will be used.") return cmd } func supportVersion(vs string) error { if !tidbver.DMSupportDeploy(vs) { return errors.Errorf("Only support version not less than v2.0") } return nil } func postDeployHook(builder *task.Builder, topo spec.Topology, gOpt operator.Options) { enableTask := task.NewBuilder(builder.Logger).Func("Setting service auto start on boot", func(ctx context.Context) error { return operator.Enable(ctx, topo, operator.Options{}, true) }).BuildAsStep("Enable service").SetHidden(true) builder.Parallel(false, enableTask) } tiup-1.16.3/components/dm/command/deploy_test.go000066400000000000000000000006411505422223000216440ustar00rootroot00000000000000package command import ( "testing" "github.com/stretchr/testify/require" ) func TestSupportVersion(t *testing.T) { assert := require.New(t) tests := map[string]bool{ // version to support or not "v2.0.0": true, "v6.0.0": true, "v1.0.1": false, "v1.1.1": false, } for v, support := range tests { err := supportVersion(v) if support { assert.Nil(err) } else { assert.NotNil(err) } } } tiup-1.16.3/components/dm/command/destroy.go000066400000000000000000000041541505422223000210050ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/components/dm/spec" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/set" "github.com/spf13/cobra" ) func newDestroyCmd() *cobra.Command { destroyOpt := operator.Options{} cmd := &cobra.Command{ Use: "destroy ", Short: "Destroy a specified DM cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] // Validate the retained roles to prevent unexpected deleting data if len(destroyOpt.RetainDataRoles) > 0 { validRoles := set.NewStringSet(spec.AllDMComponentNames()...) for _, role := range destroyOpt.RetainDataRoles { if !validRoles.Exist(role) { return perrs.Errorf("role name `%s` invalid", role) } } } return cm.DestroyCluster(clusterName, gOpt, destroyOpt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringArrayVar(&destroyOpt.RetainDataNodes, "retain-node-data", nil, "Specify the nodes or hosts whose data will be retained") cmd.Flags().StringArrayVar(&destroyOpt.RetainDataRoles, "retain-role-data", nil, "Specify the roles whose data will be retained") cmd.Flags().BoolVar(&destroyOpt.Force, "force", false, "Force will ignore remote error while destroy the cluster") return cmd } tiup-1.16.3/components/dm/command/disable.go000066400000000000000000000026771505422223000207270ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newDisableCmd() *cobra.Command { cmd := &cobra.Command{ Use: "disable ", Short: "Disable automatic enabling of DM clusters at boot", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return nil } clusterName := args[0] return cm.EnableCluster(clusterName, gOpt, false) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only disable specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only disable specified nodes") return cmd } tiup-1.16.3/components/dm/command/display.go000066400000000000000000000047571505422223000207720ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "errors" "fmt" "strings" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/spf13/cobra" ) func newDisplayCmd() *cobra.Command { var ( dopt manager.DisplayOption showVersionOnly bool statusTimeout uint64 ) cmd := &cobra.Command{ Use: "display ", Short: "Display information of a DM cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } gOpt.APITimeout = statusTimeout dopt.ClusterName = args[0] if showVersionOnly { metadata, err := spec.ClusterMetadata(dopt.ClusterName) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } fmt.Println(metadata.Version) return nil } return cm.Display(dopt, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only display specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only display specified nodes") cmd.Flags().BoolVar(&showVersionOnly, "version", false, "Only display DM cluster version") cmd.Flags().BoolVar(&dopt.ShowUptime, "uptime", false, "Display DM with uptime") cmd.Flags().Uint64Var(&statusTimeout, "status-timeout", 10, "Timeout in seconds when getting node status") return cmd } func shellCompGetClusterName(cm *manager.Manager, toComplete string) ([]string, cobra.ShellCompDirective) { var result []string clusters, _ := cm.GetClusterList() for _, c := range clusters { if strings.HasPrefix(c.Name, toComplete) { result = append(result, c.Name) } } return result, cobra.ShellCompDirectiveNoFileComp } tiup-1.16.3/components/dm/command/edit_config.go000066400000000000000000000030141505422223000215600ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/spf13/cobra" ) func newEditConfigCmd() *cobra.Command { opt := manager.EditConfigOptions{} cmd := &cobra.Command{ Use: "edit-config ", Short: "Edit DM cluster config", Long: "Edit DM cluster config. Will use editor from environment variable `EDITOR`, default use vi", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.EditConfig(clusterName, opt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVarP(&opt.NewTopoFile, "topology-file", "", opt.NewTopoFile, "Use provided topology file to substitute the original one instead of editing it.") return cmd } tiup-1.16.3/components/dm/command/enable.go000066400000000000000000000026621505422223000205440ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newEnableCmd() *cobra.Command { cmd := &cobra.Command{ Use: "enable ", Short: "Enable a DM cluster automatically at boot", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return nil } clusterName := args[0] return cm.EnableCluster(clusterName, gOpt, true) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only enable specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only enable specified nodes") return cmd } tiup-1.16.3/components/dm/command/exec.go000066400000000000000000000032041505422223000202330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/spf13/cobra" ) func newExecCmd() *cobra.Command { opt := manager.ExecOptions{} cmd := &cobra.Command{ Use: "exec ", Short: "Run shell command on host in the dm cluster", Hidden: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.Exec(clusterName, opt, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVar(&opt.Command, "command", "ls", "the command run on cluster host") cmd.Flags().BoolVar(&opt.Sudo, "sudo", false, "use root permissions (default false)") cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only exec on host with specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only exec on host with specified nodes") return cmd } tiup-1.16.3/components/dm/command/import.go000066400000000000000000000064361505422223000206330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "context" "fmt" "os" "github.com/fatih/color" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/dm/ansible" cansible "github.com/pingcap/tiup/pkg/cluster/ansible" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/manager" "github.com/pingcap/tiup/pkg/tui" tiuputils "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" "gopkg.in/yaml.v3" ) func newImportCmd() *cobra.Command { var ansibleDir string var inventoryFileName string var rename string var clusterVersion string cmd := &cobra.Command{ Use: "import", Short: "Import an exist DM 1.0 cluster from dm-ansible and re-deploy 2.0 version", RunE: func(cmd *cobra.Command, args []string) error { if err := supportVersion(clusterVersion); err != nil { return err } importer, err := ansible.NewImporter(ansibleDir, inventoryFileName, gOpt.SSHType, gOpt.SSHTimeout) if err != nil { return err } ctx := ctxt.New(context.Background(), 0, log) clusterName, meta, err := importer.ImportFromAnsibleDir(ctx) if err != nil { return err } if rename != "" { clusterName = rename } err = importer.ScpSourceToMaster(ctx, meta.Topology) if err != nil { return err } data, err := yaml.Marshal(meta.Topology) if err != nil { return errors.AddStack(err) } f, err := os.CreateTemp("", "tiup-*") if err != nil { return errors.AddStack(err) } _, err = f.Write(data) if err != nil { return errors.AddStack(err) } fmt.Println(color.HiYellowString("Will use the following topology to deploy a DM cluster: ")) fmt.Println(string(data)) if !skipConfirm { err = tui.PromptForConfirmOrAbortError( "%s", color.HiYellowString("Using the Topology to deploy DM %s cluster %s, Please Stop the DM cluster from ansible side first.\nDo you want to continue? [y/N]: ", clusterVersion, clusterName, )) if err != nil { return err } } err = cm.Deploy( clusterName, clusterVersion, f.Name(), manager.DeployOptions{ IdentityFile: cansible.SSHKeyPath(), User: tiuputils.CurrentUser(), }, nil, skipConfirm, gOpt, ) if err != nil { return err } return nil }, } cmd.Flags().StringVarP(&ansibleDir, "dir", "d", "./", "The path to DM-Ansible directory") cmd.Flags().StringVar(&inventoryFileName, "inventory", cansible.AnsibleInventoryFile, "The name of inventory file") cmd.Flags().StringVarP(&rename, "rename", "r", "", "Rename the imported cluster to `NAME`") cmd.Flags().StringVarP(&clusterVersion, "cluster-version", "v", "", "cluster version of DM to deploy (required)") err := cmd.MarkFlagRequired("cluster-version") if err != nil { // if no this flag panic(err) } return cmd } tiup-1.16.3/components/dm/command/list.go000066400000000000000000000014151505422223000202640ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "List all clusters", RunE: func(cmd *cobra.Command, args []string) error { return cm.ListCluster() }, } return cmd } tiup-1.16.3/components/dm/command/meta.go000066400000000000000000000035051505422223000202410ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "fmt" "time" "github.com/spf13/cobra" ) func newMetaCmd() *cobra.Command { cmd := &cobra.Command{ Use: "meta", Short: "backup/restore meta information", } var filePath string var metaBackupCmd = &cobra.Command{ Use: "backup ", Short: "backup topology and other information of a cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return fmt.Errorf("please input cluster name") } if filePath == "" { filePath = "tiup-cluster_" + args[0] + "_metabackup_" + time.Now().Format(time.RFC3339) + ".tar.gz" } err := cm.BackupClusterMeta(args[0], filePath) if err == nil { log.Infof("successfully backup meta of cluster %s on %s", args[0], filePath) } return err }, } metaBackupCmd.Flags().StringVar(&filePath, "file", "", "filepath of output tarball") var metaRestoreCmd = &cobra.Command{ Use: "restore ", Short: "restore topology and other information of a cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return fmt.Errorf("please input cluster name and path to the backup file") } return cm.RestoreClusterMeta(args[0], args[1], skipConfirm) }, } cmd.AddCommand(metaBackupCmd) cmd.AddCommand(metaRestoreCmd) return cmd } tiup-1.16.3/components/dm/command/patch.go000066400000000000000000000031601505422223000204070ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( perrs "github.com/pingcap/errors" "github.com/spf13/cobra" ) func newPatchCmd() *cobra.Command { var ( overwrite bool offlineMode bool ) cmd := &cobra.Command{ Use: "patch ", Short: "Replace the remote package with a specified package and restart the service", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } if len(gOpt.Nodes) == 0 && len(gOpt.Roles) == 0 { return perrs.New("the flag -R or -N must be specified at least one") } clusterName := args[0] return cm.Patch(clusterName, args[1], gOpt, overwrite, offlineMode, skipConfirm) }, } cmd.Flags().BoolVar(&overwrite, "overwrite", false, "Use this package in the future scale-out operations") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Specify the nodes") cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Specify the roles") cmd.Flags().BoolVarP(&offlineMode, "offline", "", false, "Patch a stopped cluster") return cmd } tiup-1.16.3/components/dm/command/prune.go000066400000000000000000000063201505422223000204420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "sync" "time" "github.com/pingcap/tiup/components/dm/spec" "github.com/pingcap/tiup/pkg/cluster/api" operator "github.com/pingcap/tiup/pkg/cluster/operation" tidbspec "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/spf13/cobra" "go.uber.org/zap" ) func newPruneCmd() *cobra.Command { cmd := &cobra.Command{ Use: "prune ", Short: "Clear etcd info ", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] metadata := new(spec.Metadata) err := dmspec.Metadata(clusterName, metadata) if err != nil { return err } return clearOutDatedEtcdInfo(clusterName, metadata, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } return cmd } func clearOutDatedEtcdInfo(clusterName string, metadata *spec.Metadata, opt operator.Options) error { topo := metadata.Topology existedMasters := make(map[string]struct{}) existedWorkers := make(map[string]struct{}) mastersToDelete := make([]string, 0) workersToDelete := make([]string, 0) for _, masterSpec := range topo.Masters { existedMasters[masterSpec.Name] = struct{}{} } for _, workerSpec := range topo.Workers { existedWorkers[workerSpec.Name] = struct{}{} } tlsCfg, err := topo.TLSConfig(dmspec.Path(clusterName, tidbspec.TLSCertKeyDir)) if err != nil { return err } dmMasterClient := api.NewDMMasterClient(topo.GetMasterListWithManageHost(), 10*time.Second, tlsCfg) registeredMasters, registeredWorkers, err := dmMasterClient.GetRegisteredMembers() if err != nil { return err } for _, master := range registeredMasters { if _, ok := existedMasters[master]; !ok { mastersToDelete = append(mastersToDelete, master) } } for _, worker := range registeredWorkers { if _, ok := existedWorkers[worker]; !ok { workersToDelete = append(workersToDelete, worker) } } zap.L().Info("Outdated components needed to clear etcd info", zap.Strings("masters", mastersToDelete), zap.Strings("workers", workersToDelete)) errCh := make(chan error, len(existedMasters)+len(existedWorkers)) var wg sync.WaitGroup for _, master := range mastersToDelete { wg.Add(1) go func() { errCh <- dmMasterClient.OfflineMaster(master, nil) wg.Done() }() } for _, worker := range workersToDelete { wg.Add(1) go func() { errCh <- dmMasterClient.OfflineWorker(worker, nil) wg.Done() }() } wg.Wait() if len(errCh) == 0 { return nil } // return any one error return <-errCh } tiup-1.16.3/components/dm/command/reload.go000066400000000000000000000036751505422223000205710ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "slices" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/components/dm/spec" "github.com/spf13/cobra" ) func newReloadCmd() *cobra.Command { var skipRestart bool cmd := &cobra.Command{ Use: "reload ", Short: "Reload a DM cluster's config and restart if needed", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } if err := validRoles(gOpt.Roles); err != nil { return err } clusterName := args[0] return cm.Reload(clusterName, gOpt, skipRestart, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only reload specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only reload specified nodes") cmd.Flags().BoolVar(&skipRestart, "skip-restart", false, "Only refresh configuration to remote and do not restart services") return cmd } func validRoles(roles []string) error { for _, r := range roles { match := slices.Contains(spec.AllDMComponentNames(), r) if !match { return perrs.Errorf("not valid role: %s, should be one of: %v", r, spec.AllDMComponentNames()) } } return nil } tiup-1.16.3/components/dm/command/replay.go000066400000000000000000000032571505422223000206130ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "fmt" "path" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/audit" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/tui" "github.com/spf13/cobra" ) func newReplayCmd() *cobra.Command { cmd := &cobra.Command{ Use: "replay ", Short: "Replay previous operation and skip successed steps", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } file := path.Join(spec.AuditDir(), args[0]) if !checkpoint.HasCheckPoint() { if err := checkpoint.SetCheckPoint(file); err != nil { return errors.Annotate(err, "set checkpoint failed") } } args, err := audit.CommandArgs(file) if err != nil { return errors.Annotate(err, "read audit log failed") } if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("Will replay the command `tiup dm %s`\nDo you want to continue? [y/N]: ", strings.Join(args[1:], " ")), ); err != nil { return err } } rootCmd.SetArgs(args[1:]) return rootCmd.Execute() }, } return cmd } tiup-1.16.3/components/dm/command/restart.go000066400000000000000000000025421505422223000207770ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newRestartCmd() *cobra.Command { cmd := &cobra.Command{ Use: "restart ", Short: "Restart a DM cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.RestartCluster(clusterName, gOpt, skipConfirm) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only restart specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only restart specified nodes") return cmd } tiup-1.16.3/components/dm/command/root.go000066400000000000000000000216721505422223000203030ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "encoding/json" "fmt" "os" "path" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/components/dm/spec" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/cluster/manager" operator "github.com/pingcap/tiup/pkg/cluster/operation" cspec "github.com/pingcap/tiup/pkg/cluster/spec" tiupmeta "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/logger" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "github.com/spf13/cobra" "go.uber.org/zap" ) var ( // nolint errNS = errorx.NewNamespace("cmd") rootCmd *cobra.Command gOpt operator.Options skipConfirm bool log = logprinter.NewLogger("") // init default logger ) var dmspec *cspec.SpecManager var cm *manager.Manager func init() { logger.InitGlobalLogger() tui.AddColorFunctionsForCobra() cobra.EnableCommandSorting = false nativeEnvVar := strings.ToLower(os.Getenv(localdata.EnvNameNativeSSHClient)) if nativeEnvVar == "true" || nativeEnvVar == "1" || nativeEnvVar == "enable" { gOpt.NativeSSH = true } rootCmd = &cobra.Command{ Use: tui.OsArgs0(), Short: "(EXPERIMENTAL) Deploy a DM cluster", Long: `EXPERIMENTAL: This is an experimental feature, things may or may not work, please backup your data before process.`, SilenceUsage: true, SilenceErrors: true, Version: version.NewTiUPVersion().String(), PersistentPreRunE: func(cmd *cobra.Command, args []string) error { // populate logger log.SetDisplayModeFromString(gOpt.DisplayMode) var err error var env *tiupmeta.Environment if err = cspec.Initialize("dm"); err != nil { return err } dmspec = spec.GetSpecManager() logger.EnableAuditLog(cspec.AuditDir()) cm = manager.NewManager("dm", dmspec, log) // Running in other OS/ARCH Should be fine we only download manifest file. env, err = tiupmeta.InitEnv(repository.Options{ GOOS: "linux", GOARCH: "amd64", }, repository.MirrorOptions{}) if err != nil { return err } tiupmeta.SetGlobalEnv(env) if gOpt.NativeSSH { gOpt.SSHType = executor.SSHTypeSystem zap.L().Info("System ssh client will be used", zap.String(localdata.EnvNameNativeSSHClient, os.Getenv(localdata.EnvNameNativeSSHClient))) fmt.Println("The --native-ssh flag has been deprecated, please use --ssh=system") } err = proxy.MaybeStartProxy( gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, gOpt.SSHProxyUsePassword, gOpt.SSHProxyIdentity, log, ) if err != nil { return perrs.Annotate(err, "start http-proxy") } return nil }, PersistentPostRunE: func(cmd *cobra.Command, args []string) error { proxy.MaybeStopProxy() return tiupmeta.GlobalEnv().V1Repository().Mirror().Close() }, } tui.BeautifyCobraUsageAndHelp(rootCmd) rootCmd.PersistentFlags().Uint64Var(&gOpt.SSHTimeout, "ssh-timeout", 5, "Timeout in seconds to connect host via SSH, ignored for operations that don't need an SSH connection.") rootCmd.PersistentFlags().Uint64Var(&gOpt.OptTimeout, "wait-timeout", 120, "Timeout in seconds to wait for an operation to complete, ignored for operations that don't fit.") rootCmd.PersistentFlags().BoolVarP(&skipConfirm, "yes", "y", false, "Skip all confirmations and assumes 'yes'") rootCmd.PersistentFlags().BoolVar(&gOpt.NativeSSH, "native-ssh", gOpt.NativeSSH, "Use the SSH client installed on local system instead of the built-in one.") rootCmd.PersistentFlags().StringVar((*string)(&gOpt.SSHType), "ssh", "", "The executor type: 'builtin', 'system', 'none'") rootCmd.PersistentFlags().IntVarP(&gOpt.Concurrency, "concurrency", "c", 5, "max number of parallel tasks allowed") rootCmd.PersistentFlags().StringVar(&gOpt.DisplayMode, "format", "default", "(EXPERIMENTAL) The format of output, available values are [default, json]") rootCmd.PersistentFlags().StringVar(&gOpt.SSHProxyHost, "ssh-proxy-host", "", "The SSH proxy host used to connect to remote host.") rootCmd.PersistentFlags().StringVar(&gOpt.SSHProxyUser, "ssh-proxy-user", utils.CurrentUser(), "The user name used to login the proxy host.") rootCmd.PersistentFlags().IntVar(&gOpt.SSHProxyPort, "ssh-proxy-port", 22, "The port used to login the proxy host.") rootCmd.PersistentFlags().StringVar(&gOpt.SSHProxyIdentity, "ssh-proxy-identity-file", path.Join(utils.UserHome(), ".ssh", "id_rsa"), "The identity file used to login the proxy host.") rootCmd.PersistentFlags().BoolVar(&gOpt.SSHProxyUsePassword, "ssh-proxy-use-password", false, "Use password to login the proxy host.") rootCmd.PersistentFlags().Uint64Var(&gOpt.SSHProxyTimeout, "ssh-proxy-timeout", 5, "Timeout in seconds to connect the proxy host via SSH, ignored for operations that don't need an SSH connection.") _ = rootCmd.PersistentFlags().MarkHidden("native-ssh") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-host") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-user") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-port") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-identity-file") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-use-password") _ = rootCmd.PersistentFlags().MarkHidden("ssh-proxy-timeout") rootCmd.AddCommand( newDeployCmd(), newStartCmd(), newStopCmd(), newRestartCmd(), newListCmd(), newDestroyCmd(), newAuditCmd(), newExecCmd(), newEditConfigCmd(), newDisplayCmd(), newPruneCmd(), newReloadCmd(), newUpgradeCmd(), newPatchCmd(), newScaleOutCmd(), newScaleInCmd(), newImportCmd(), newEnableCmd(), newDisableCmd(), newReplayCmd(), newTemplateCmd(), newMetaCmd(), newRotateSSHCmd(), ) } func printErrorMessageForNormalError(err error) { _, _ = tui.ColorErrorMsg.Fprintf(os.Stderr, "\nError: %s\n", err.Error()) } func printErrorMessageForErrorX(err *errorx.Error) { msg := "" ident := 0 causeErrX := err for causeErrX != nil { if ident > 0 { msg += strings.Repeat(" ", ident) + "caused by: " } currentErrMsg := causeErrX.Message() if len(currentErrMsg) > 0 { if ident == 0 { // Print error code only for top level error msg += fmt.Sprintf("%s (%s)\n", currentErrMsg, causeErrX.Type().FullName()) } else { msg += fmt.Sprintf("%s\n", currentErrMsg) } ident++ } cause := causeErrX.Cause() if c := errorx.Cast(cause); c != nil { causeErrX = c } else { if cause != nil { if ident > 0 { // Out most error may have empty message. In this case we treat it as a transparent error. // Thus `ident == 0` can be possible. msg += strings.Repeat(" ", ident) + "caused by: " } msg += fmt.Sprintf("%s\n", cause.Error()) } break } } _, _ = tui.ColorErrorMsg.Fprintf(os.Stderr, "\nError: %s", msg) } func extractSuggestionFromErrorX(err *errorx.Error) string { cause := err for cause != nil { v, ok := cause.Property(utils.ErrPropSuggestion) if ok { if s, ok := v.(string); ok { return s } } cause = errorx.Cast(cause.Cause()) } return "" } // Execute executes the root command func Execute() { zap.L().Info("Execute command", zap.String("command", tui.OsArgs())) zap.L().Debug("Environment variables", zap.Strings("env", os.Environ())) code := 0 err := rootCmd.Execute() if err != nil { code = 1 } zap.L().Info("Execute command finished", zap.Int("code", code), zap.Error(err)) if err != nil { switch strings.ToLower(gOpt.DisplayMode) { case "json": obj := struct { Err string `json:"error"` }{ Err: err.Error(), } data, err := json.Marshal(obj) if err != nil { fmt.Printf("{\"error\": \"%s\"}", err) break } fmt.Fprintln(os.Stderr, string(data)) default: if errx := errorx.Cast(err); errx != nil { printErrorMessageForErrorX(errx) } else { printErrorMessageForNormalError(err) } if !errorx.HasTrait(err, utils.ErrTraitPreCheck) { logger.OutputDebugLog("tiup-dm") } if errx := errorx.Cast(err); errx != nil { if suggestion := extractSuggestionFromErrorX(errx); len(suggestion) > 0 { _, _ = fmt.Fprintf(os.Stderr, "\n%s\n", suggestion) } } } } err = logger.OutputAuditLogIfEnabled() if err != nil { zap.L().Warn("Write audit log file failed", zap.Error(err)) code = 1 } color.Unset() if code != 0 { os.Exit(code) } } tiup-1.16.3/components/dm/command/rotate_ssh.go000066400000000000000000000016341505422223000214670ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newRotateSSHCmd() *cobra.Command { cmd := &cobra.Command{ Use: "rotatessh ", Short: "rotate ssh keys on all nodes", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.RotateSSH(clusterName, gOpt, skipConfirm) }, } return cmd } tiup-1.16.3/components/dm/command/scale_in.go000066400000000000000000000133361505422223000210730ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "context" "crypto/tls" "fmt" "time" "github.com/pingcap/errors" dm "github.com/pingcap/tiup/components/dm/spec" dmtask "github.com/pingcap/tiup/components/dm/task" "github.com/pingcap/tiup/pkg/cluster/api" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newScaleInCmd() *cobra.Command { cmd := &cobra.Command{ Use: "scale-in ", Short: "Scale in a DM cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] scale := func(b *task.Builder, imetadata spec.Metadata, tlsCfg *tls.Config) { metadata := imetadata.(*dm.Metadata) b.Func( fmt.Sprintf("ScaleInCluster: options=%+v", gOpt), func(ctx context.Context) error { return ScaleInDMCluster(ctx, metadata.Topology, gOpt, tlsCfg) }, ).Serial(dmtask.NewUpdateDMMeta(clusterName, metadata, gOpt.Nodes)) } return cm.ScaleIn(clusterName, skipConfirm, gOpt, scale) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Specify the nodes (required)") cmd.Flags().BoolVar(&gOpt.Force, "force", false, "Force just try stop and destroy instance before removing the instance from topo") _ = cmd.MarkFlagRequired("node") return cmd } // ScaleInDMCluster scale in dm cluster. func ScaleInDMCluster( ctx context.Context, topo *dm.Specification, options operator.Options, tlsCfg *tls.Config, ) error { // instances by uuid instances := map[string]dm.Instance{} instCount := map[string]int{} // make sure all nodeIds exists in topology for _, component := range topo.ComponentsByStartOrder() { for _, instance := range component.Instances() { instances[instance.ID()] = instance instCount[instance.GetManageHost()]++ } } // Clean components deletedDiff := map[string][]dm.Instance{} deletedNodes := set.NewStringSet(options.Nodes...) for nodeID := range deletedNodes { inst, found := instances[nodeID] if !found { return errors.Errorf("cannot find node id '%s' in topology", nodeID) } deletedDiff[inst.ComponentName()] = append(deletedDiff[inst.ComponentName()], inst) } // Cannot delete all DM DMMaster servers if len(deletedDiff[dm.ComponentDMMaster]) == len(topo.Masters) { return errors.New("cannot delete all dm-master servers") } if options.Force { for _, component := range topo.ComponentsByStartOrder() { for _, instance := range component.Instances() { if !deletedNodes.Exist(instance.ID()) { continue } instCount[instance.GetManageHost()]-- if err := operator.StopAndDestroyInstance(ctx, topo, instance, options, false, instCount[instance.GetManageHost()] == 0, tlsCfg); err != nil { log.Warnf("failed to stop/destroy %s: %v", component.Name(), err) } } } return nil } // At least a DMMaster server exists var dmMasterClient *api.DMMasterClient var dmMasterEndpoint []string for _, instance := range (&dm.DMMasterComponent{Topology: topo}).Instances() { if !deletedNodes.Exist(instance.ID()) { dmMasterEndpoint = append(dmMasterEndpoint, utils.JoinHostPort(instance.GetManageHost(), instance.GetPort())) } } if len(dmMasterEndpoint) == 0 { return errors.New("cannot find available dm-master instance") } dmMasterClient = api.NewDMMasterClient(dmMasterEndpoint, 10*time.Second, tlsCfg) noAgentHosts := set.NewStringSet() topo.IterInstance(func(inst dm.Instance) { if inst.IgnoreMonitorAgent() { noAgentHosts.Insert(inst.GetManageHost()) } }) // Delete member from cluster for _, component := range topo.ComponentsByStartOrder() { for _, instance := range component.Instances() { if !deletedNodes.Exist(instance.ID()) { continue } if err := operator.StopComponent( ctx, topo, []dm.Instance{instance}, noAgentHosts, options, false, false, /* evictLeader */ &tls.Config{}, /* not used as evictLeader is false */ ); err != nil { return errors.Annotatef(err, "failed to stop %s", component.Name()) } switch component.Name() { case dm.ComponentDMMaster: name := instance.(*dm.MasterInstance).Name err := dmMasterClient.OfflineMaster(name, nil) if err != nil { return err } case dm.ComponentDMWorker: name := instance.(*dm.WorkerInstance).Name err := dmMasterClient.OfflineWorker(name, nil) if err != nil { return err } } if err := operator.DestroyComponent(ctx, []dm.Instance{instance}, topo, options); err != nil { return errors.Annotatef(err, "failed to destroy %s", component.Name()) } instCount[instance.GetManageHost()]-- if instCount[instance.GetManageHost()] == 0 { if err := operator.DeletePublicKey(ctx, instance.GetManageHost()); err != nil { return errors.Annotatef(err, "failed to delete public key") } } } } return nil } tiup-1.16.3/components/dm/command/scale_out.go000066400000000000000000000044221505422223000212700ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "path/filepath" "github.com/pingcap/tiup/pkg/cluster/manager" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func newScaleOutCmd() *cobra.Command { opt := manager.DeployOptions{ IdentityFile: filepath.Join(utils.UserHome(), ".ssh", "id_rsa"), } cmd := &cobra.Command{ Use: "scale-out ", Short: "Scale out a DM cluster", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } clusterName := args[0] topoFile := args[1] return cm.ScaleOut(clusterName, topoFile, postScaleOutHook, nil, opt, skipConfirm, gOpt) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) case 1: return nil, cobra.ShellCompDirectiveDefault default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringVarP(&opt.User, "user", "u", utils.CurrentUser(), "The user name to login via SSH. The user must has root (or sudo) privilege.") cmd.Flags().StringVarP(&opt.IdentityFile, "identity_file", "i", opt.IdentityFile, "The path of the SSH identity file. If specified, public key authentication will be used.") cmd.Flags().BoolVarP(&opt.UsePassword, "password", "p", false, "Use password of target hosts. If specified, password authentication will be used.") return cmd } func postScaleOutHook(builder *task.Builder, newPart spec.Topology, gOpt operator.Options) { postDeployHook(builder, newPart, gOpt) } tiup-1.16.3/components/dm/command/start.go000066400000000000000000000025201505422223000204440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newStartCmd() *cobra.Command { cmd := &cobra.Command{ Use: "start ", Short: "Start a DM cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.StartCluster(clusterName, gOpt, false) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only start specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only start specified nodes") return cmd } tiup-1.16.3/components/dm/command/stop.go000066400000000000000000000025271505422223000203030ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "github.com/spf13/cobra" ) func newStopCmd() *cobra.Command { cmd := &cobra.Command{ Use: "stop ", Short: "Stop a DM cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } clusterName := args[0] return cm.StopCluster(clusterName, gOpt, skipConfirm, false) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().StringSliceVarP(&gOpt.Roles, "role", "R", nil, "Only stop specified roles") cmd.Flags().StringSliceVarP(&gOpt.Nodes, "node", "N", nil, "Only stop specified nodes") return cmd } tiup-1.16.3/components/dm/command/template.go000066400000000000000000000117401505422223000211260ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "bytes" "fmt" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/spf13/cobra" ) // TemplateOptions contains the options for print topology template. type TemplateOptions struct { Full bool // print full template Local bool // print and render local template } // LocalTemplate contains the variables for print local template. type LocalTemplate struct { GlobalUser string // global.user in yaml template GlobalGroup string // global.group in yaml template GlobalSystemdMode string // global.systemd_mode in yaml template GlobalSSHPort int // global.ssh_port in yaml template GlobalDeployDir string // global.deploy_dir in yaml template GlobalDataDir string // global.data_dir in yaml template GlobalArch string // global.arch in yaml template MasterServers []string // master_servers in yaml template WorkerServers []string // worker_servers in yaml template MonitoringServers []string // monitoring_servers in yaml template GrafanaServers []string // grafana_servers in yaml template AlertManagerServers []string // alertmanager_servers in yaml template } func newTemplateCmd() *cobra.Command { opt := TemplateOptions{} localOpt := LocalTemplate{} cmd := &cobra.Command{ Use: "template", Short: "Print topology template", RunE: func(cmd *cobra.Command, args []string) error { name := "minimal.yaml" switch { case opt.Full: name = "topology.example.yaml" case opt.Local: name = "local.tpl" } fp := path.Join("examples", "dm", name) tpl, err := embed.ReadExample(fp) if err != nil { return err } if !opt.Local { // print example yaml and return fmt.Fprintln(cmd.OutOrStdout(), string(tpl)) return nil } // redner template // validate arch if localOpt.GlobalArch != "amd64" && localOpt.GlobalArch != "arm64" { return fmt.Errorf(`supported values are "amd64" or "arm64" in global.arch`) } // validate number of masters and workers if len(localOpt.MasterServers) < 3 { return fmt.Errorf( "at least 3 masters must be defined (given %d servers)", len(localOpt.MasterServers), ) } if len(localOpt.WorkerServers) < 3 { return fmt.Errorf( "at least 3 workers must be defined (given %d servers)", len(localOpt.WorkerServers), ) } tmpl, err := template.New(name).Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, &localOpt); err != nil { return err } fmt.Fprintln(cmd.OutOrStdout(), content.String()) return nil }, } cmd.Flags().BoolVar(&opt.Full, "full", false, "Print the full topology template for DM cluster.") cmd.Flags().BoolVar(&opt.Local, "local", false, "Print and render template for deploying a simple DM cluster locally.") // template values for rendering cmd.Flags().StringVar(&localOpt.GlobalUser, "user", "tidb", "The user who runs the tidb cluster.") cmd.Flags().StringVar(&localOpt.GlobalGroup, "group", "", "group is used to specify the group name the user belong to if it's not the same as user.") cmd.Flags().StringVar(&localOpt.GlobalSystemdMode, "systemd_mode", "system", "systemd_mode is used to select whether to use sudo permissions.") cmd.Flags().IntVar(&localOpt.GlobalSSHPort, "ssh-port", 22, "SSH port of servers in the managed cluster.") cmd.Flags().StringVar(&localOpt.GlobalDeployDir, "deploy-dir", "/tidb-deploy", "Storage directory for cluster deployment files, startup scripts, and configuration files.") cmd.Flags().StringVar(&localOpt.GlobalDataDir, "data-dir", "/tidb-data", "TiDB Cluster data storage directory.") cmd.Flags().StringVar(&localOpt.GlobalArch, "arch", "amd64", "Supported values: \"amd64\", \"arm64\".") cmd.Flags().StringSliceVar(&localOpt.MasterServers, "master-servers", []string{"172.19.0.101", "172.19.0.102", "172.19.0.103"}, "List of Master servers") cmd.Flags().StringSliceVar(&localOpt.WorkerServers, "worker-servers", []string{"172.19.0.101", "172.19.0.102", "172.19.0.103"}, "List of Worker servers") cmd.Flags().StringSliceVar(&localOpt.MonitoringServers, "monitoring-servers", []string{"172.19.0.101"}, "List of monitor servers") cmd.Flags().StringSliceVar(&localOpt.GrafanaServers, "grafana-servers", []string{"172.19.0.101"}, "List of grafana servers") cmd.Flags().StringSliceVar(&localOpt.AlertManagerServers, "alertmanager-servers", []string{"172.19.0.101"}, "List of alermanager servers") return cmd } tiup-1.16.3/components/dm/command/template_test.go000066400000000000000000000105101505422223000221570ustar00rootroot00000000000000package command import ( "bytes" "io" "strings" "testing" ) func Test_DMTemplateLocalCommandSingle(t *testing.T) { tests := []struct { optKey string optVal string expected string }{ {"user", "ubuntu", "user: \"ubuntu\""}, {"group", "ubuntu", "group: \"ubuntu\""}, {"ssh-port", "2222", "ssh_port: 2222"}, {"deploy-dir", "/path/to/deploy", "deploy_dir: \"/path/to/deploy\""}, {"data-dir", "/path/to/data", "data_dir: \"/path/to/data\""}, {"arch", "arm64", "arch: \"arm64\""}, {"master-servers", "a,b,c", "master_servers:\n - host: a\n - host: b\n - host: c"}, {"worker-servers", "a,b,c", "worker_servers:\n - host: a\n - host: b\n - host: c"}, {"monitoring-servers", "a,b,c", "monitoring_servers:\n - host: a\n - host: b\n - host: c"}, {"grafana-servers", "a,b,c", "grafana_servers:\n - host: a\n - host: b\n - host: c"}, {"alertmanager-servers", "a,b,c", "alertmanager_servers:\n - host: a\n - host: b\n - host: c"}, } for _, test := range tests { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local _ = cmd.Flags().Set(test.optKey, test.optVal) if err := cmd.Execute(); err != nil { t.Fatal(err) } out, err := io.ReadAll(b) if err != nil { t.Fatal(err) } if !strings.Contains(string(out), test.expected) { t.Fatalf("expected \"%s\", got \"%s\"", test.expected, string(out)) } } } func Test_DMTemplateLocalCommandMulti(t *testing.T) { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local _ = cmd.Flags().Set("user", "ubuntu") // add --user=ubuntu _ = cmd.Flags().Set("group", "ubuntu") // add --group=ubuntu _ = cmd.Flags().Set("master-servers", "m1,m2,m3") // add --master-servers=m1,m2,m3 _ = cmd.Flags().Set("worker-servers", "w1,w2,w3") // add --worker-servers=w1,w2,w3 _ = cmd.Flags().Set("alertmanager-servers", "a1,a2,a3") // add --alertmanager-servers=a1,a2,a3 if err := cmd.Execute(); err != nil { t.Fatal(err) } out, err := io.ReadAll(b) if err != nil { t.Fatal(err) } for _, b := range []bool{ strings.Contains(string(out), "user: \"ubuntu\""), strings.Contains(string(out), "group: \"ubuntu\""), strings.Contains(string(out), "master_servers:\n - host: m1\n - host: m2\n - host: m3"), strings.Contains(string(out), "worker_servers:\n - host: w1\n - host: w2\n - host: w3"), strings.Contains(string(out), "alertmanager_servers:\n - host: a1\n - host: a2\n - host: a3"), } { if !b { t.Fatalf("unexpected output. got \"%s\"", string(out)) } } } func Test_TemplateLocalCommandNoopt(t *testing.T) { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local if err := cmd.Execute(); err != nil { t.Fatal(err) } out, err := io.ReadAll(b) if err != nil { t.Fatal(err) } // check default output for _, b := range []bool{ strings.Contains(string(out), "user: \"tidb\""), strings.Contains(string(out), "ssh_port: 22"), strings.Contains(string(out), "deploy_dir: \"/tidb-deploy\""), strings.Contains(string(out), "data_dir: \"/tidb-data\""), strings.Contains(string(out), "arch: \"amd64\""), strings.Contains(string(out), "master_servers:\n - host: 172.19.0.101\n - host: 172.19.0.102\n - host: 172.19.0.103"), strings.Contains(string(out), "worker_servers:\n - host: 172.19.0.101\n - host: 172.19.0.102\n - host: 172.19.0.103"), strings.Contains(string(out), "monitoring_servers:\n - host: 172.19.0.101"), strings.Contains(string(out), "grafana_servers:\n - host: 172.19.0.101"), strings.Contains(string(out), "alertmanager_servers:\n - host: 172.19.0.101"), } { if !b { t.Fatalf("unexpected output. got \"%s\"", string(out)) } } } func Test_TemplateLocalCommandValidate(t *testing.T) { tests := []struct { optKey string optVal string }{ {"arch", "i386"}, {"master-servers", "m1,m2"}, {"worker-servers", "w1"}, } for _, test := range tests { cmd := newTemplateCmd() b := bytes.NewBufferString("") cmd.SetOut(b) _ = cmd.Flags().Set("local", "true") // add --local _ = cmd.Flags().Set(test.optKey, test.optVal) // add invalid option // should returns err if err := cmd.Execute(); err == nil { t.Fatal(err) } } } tiup-1.16.3/components/dm/command/upgrade.go000066400000000000000000000027731505422223000207500ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package command import ( "time" "github.com/spf13/cobra" ) func newUpgradeCmd() *cobra.Command { offlineMode := false ignoreVersionCheck := false cmd := &cobra.Command{ Use: "upgrade ", Short: "Upgrade a specified DM cluster", RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 2 { return cmd.Help() } return cm.Upgrade(args[0], args[1], nil, gOpt, skipConfirm, offlineMode, ignoreVersionCheck, time.Second*0) }, ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { switch len(args) { case 0: return shellCompGetClusterName(cm, toComplete) default: return nil, cobra.ShellCompDirectiveNoFileComp } }, } cmd.Flags().BoolVarP(&offlineMode, "offline", "", false, "Upgrade a stopped cluster") cmd.Flags().BoolVarP(&ignoreVersionCheck, "ignore-version-check", "", false, "Ignore checking if target version is higher than current version") return cmd } tiup-1.16.3/components/dm/main.go000066400000000000000000000012611505422223000166160ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "github.com/pingcap/tiup/components/dm/command" "github.com/pingcap/tiup/pkg/tui" ) func main() { tui.RegisterArg0("tiup dm") command.Execute() } tiup-1.16.3/components/dm/main_test.go000066400000000000000000000020671505422223000176620ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "os" "strings" "testing" ) // To build: // see build_integration_test in Makefile // To run: // tiup-dm.test -test.coverprofile={file} __DEVEL--i-heard-you-like-tests func TestMain(t *testing.T) { var ( args []string run bool ) for _, arg := range os.Args { switch { case arg == "__DEVEL--i-heard-you-like-tests": run = true case strings.HasPrefix(arg, "-test"): case strings.HasPrefix(arg, "__DEVEL"): default: args = append(args, arg) } } os.Args = args // fmt.Println(os.Args) if run { main() } } tiup-1.16.3/components/dm/spec/000077500000000000000000000000001505422223000162755ustar00rootroot00000000000000tiup-1.16.3/components/dm/spec/cluster.go000066400000000000000000000040661505422223000203130ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "fmt" "path/filepath" "reflect" cspec "github.com/pingcap/tiup/pkg/cluster/spec" ) var specManager *cspec.SpecManager // Metadata is the specification of generic cluster metadata type Metadata struct { User string `yaml:"user"` // the user to run and manage cluster on remote Version string `yaml:"dm_version"` // the version of TiDB cluster // EnableFirewall bool `yaml:"firewall"` Topology *Specification `yaml:"topology"` } var _ cspec.UpgradableMetadata = &Metadata{} // SetVersion implement UpgradableMetadata interface. func (m *Metadata) SetVersion(s string) { m.Version = s } // SetUser implement UpgradableMetadata interface. func (m *Metadata) SetUser(s string) { m.User = s } // GetTopology implements Metadata interface. func (m *Metadata) GetTopology() cspec.Topology { return m.Topology } // SetTopology implements Metadata interface. func (m *Metadata) SetTopology(topo cspec.Topology) { dmTopo, ok := topo.(*Specification) if !ok { panic(fmt.Sprintln("wrong type: ", reflect.TypeOf(topo))) } m.Topology = dmTopo } // GetBaseMeta implements Metadata interface. func (m *Metadata) GetBaseMeta() *cspec.BaseMeta { return &cspec.BaseMeta{ Version: m.Version, User: m.User, } } // GetSpecManager return the spec manager of dm cluster. func GetSpecManager() *cspec.SpecManager { if specManager == nil { specManager = cspec.NewSpec(filepath.Join(cspec.ProfileDir(), cspec.TiUPClusterDir), func() cspec.Metadata { return &Metadata{ Topology: new(Specification), } }) } return specManager } tiup-1.16.3/components/dm/spec/logic.go000066400000000000000000000353721505422223000177330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "strings" "sync" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" ) // Components names supported by TiUP const ( ComponentDMMaster = spec.ComponentDMMaster ComponentDMWorker = spec.ComponentDMWorker ComponentPrometheus = spec.ComponentPrometheus ComponentGrafana = spec.ComponentGrafana ComponentAlertmanager = spec.ComponentAlertmanager ) type ( // InstanceSpec represent a instance specification InstanceSpec interface { Role() string SSH() (string, int) GetMainPort() int IsImported() bool IgnoreMonitorAgent() bool } ) // Component represents a component of the cluster. type Component = spec.Component // Instance represents an instance type Instance = spec.Instance // DMMasterComponent represents TiDB component. type DMMasterComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DMMasterComponent) Name() string { return ComponentDMMaster } // Role implements Component interface. func (c *DMMasterComponent) Role() string { return ComponentDMMaster } // Source implements Component interface. func (c *DMMasterComponent) Source() string { source := c.Topology.ComponentSources.Master if source != "" { return source } return ComponentDMMaster } // CalculateVersion implements the Component interface func (c *DMMasterComponent) CalculateVersion(clusterVersion string) string { return clusterVersion } // SetVersion implements Component interface. func (c *DMMasterComponent) SetVersion(version string) { // not supported now } // Instances implements Component interface. func (c *DMMasterComponent) Instances() []Instance { ins := make([]Instance, 0) for _, s := range c.Topology.Masters { ins = append(ins, &MasterInstance{ Name: s.Name, BaseInstance: spec.BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, Source: s.Source, Ports: []int{ s.Port, s.PeerPort, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return spec.UptimeByHost(s.Host, s.Port, timeout, tlsCfg) }, Component: c, }, topo: c.Topology, }) } return ins } // MasterInstance represent the TiDB instance type MasterInstance struct { Name string spec.BaseInstance topo *Specification } // InitConfig implement Instance interface func (i *MasterInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { if err := i.BaseInstance.InitConfig(ctx, e, i.topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := i.topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*MasterSpec) scheme := utils.Ternary(enableTLS, "https", "http").(string) initialCluster := []string{} for _, masterspec := range i.topo.Masters { initialCluster = append(initialCluster, fmt.Sprintf("%s=%s", masterspec.Name, masterspec.GetAdvertisePeerURL(enableTLS))) } cfg := &scripts.DMMasterScript{ Name: spec.Name, V1SourcePath: spec.V1SourcePath, MasterAddr: utils.JoinHostPort(i.GetListenHost(), spec.Port), AdvertiseAddr: utils.JoinHostPort(spec.Host, spec.Port), PeerURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.PeerPort)), AdvertisePeerURL: spec.GetAdvertisePeerURL(enableTLS), InitialCluster: strings.Join(initialCluster, ","), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, NumaNode: spec.NumaNode, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_dm-master_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_dm-master.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } if spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths); err != nil { return err } specConfig := spec.Config return i.MergeServerConfig(ctx, e, i.topo.ServerConfigs.Master, specConfig, paths) } // setTLSConfig set TLS Config to support enable/disable TLS // MasterInstance no need to configure TLS func (i *MasterInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { // set TLS configs if enableTLS { if configs == nil { configs = make(map[string]any) } configs["ssl-ca"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, "ca.crt", ) configs["ssl-cert"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["ssl-key"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // dm-master tls config list tlsConfigs := []string{ "ssl-ca", "ssl-cert", "ssl-key", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // ScaleConfig deploy temporary config on scaling func (i *MasterInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo spec.Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { if err := i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths); err != nil { return err } enableTLS := i.topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*MasterSpec) scheme := utils.Ternary(enableTLS, "https", "http").(string) masters := []string{} // master list from exist topo file for _, masterspec := range topo.(*Specification).Masters { masters = append(masters, utils.JoinHostPort(masterspec.Host, masterspec.Port)) } cfg := &scripts.DMMasterScaleScript{ Name: spec.Name, V1SourcePath: spec.V1SourcePath, MasterAddr: utils.JoinHostPort(i.GetListenHost(), spec.Port), AdvertiseAddr: utils.JoinHostPort(spec.Host, spec.Port), PeerURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.PeerPort)), AdvertisePeerURL: spec.GetAdvertisePeerURL(enableTLS), Join: strings.Join(masters, ","), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, NumaNode: spec.NumaNode, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_dm-master_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_dm-master.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } return nil } // DMWorkerComponent represents DM worker component. type DMWorkerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DMWorkerComponent) Name() string { return ComponentDMWorker } // Role implements Component interface. func (c *DMWorkerComponent) Role() string { return ComponentDMWorker } // Source implements Component interface. func (c *DMWorkerComponent) Source() string { source := c.Topology.ComponentSources.Worker if source != "" { return source } return ComponentDMWorker } // CalculateVersion implements the Component interface func (c *DMWorkerComponent) CalculateVersion(clusterVersion string) string { return clusterVersion } // SetVersion implements Component interface. func (c *DMWorkerComponent) SetVersion(version string) { // not supported now } // Instances implements Component interface. func (c *DMWorkerComponent) Instances() []Instance { ins := make([]Instance, 0) for _, s := range c.Topology.Workers { ins = append(ins, &WorkerInstance{ Name: s.Name, BaseInstance: spec.BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, Source: s.Source, Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return spec.UptimeByHost(s.Host, s.Port, timeout, tlsCfg) }, Component: c, }, topo: c.Topology, }) } return ins } // WorkerInstance represent the DM worker instance type WorkerInstance struct { Name string spec.BaseInstance topo *Specification } // InitConfig implement Instance interface func (i *WorkerInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { if err := i.BaseInstance.InitConfig(ctx, e, i.topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := i.topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*WorkerSpec) masters := []string{} for _, masterspec := range i.topo.Masters { masters = append(masters, utils.JoinHostPort(masterspec.Host, masterspec.Port)) } cfg := &scripts.DMWorkerScript{ Name: i.Name, WorkerAddr: utils.JoinHostPort(i.GetListenHost(), spec.Port), AdvertiseAddr: utils.JoinHostPort(spec.Host, spec.Port), Join: strings.Join(masters, ","), DeployDir: paths.Deploy, LogDir: paths.Log, NumaNode: spec.NumaNode, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_dm-worker_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_dm-worker.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } if spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths); err != nil { return err } specConfig := spec.Config return i.MergeServerConfig(ctx, e, i.topo.ServerConfigs.Worker, specConfig, paths) } // setTLSConfig set TLS Config to support enable/disable TLS // workrsInstance no need to configure TLS func (i *WorkerInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { // set TLS configs if enableTLS { if configs == nil { configs = make(map[string]any) } configs["ssl-ca"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, "ca.crt", ) configs["ssl-cert"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["ssl-key"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // dm-worker tls config list tlsConfigs := []string{ "ssl-ca", "ssl-cert", "ssl-key", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // ScaleConfig deploy temporary config on scaling func (i *WorkerInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo spec.Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = topo.(*Specification) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } // GetGlobalOptions returns cluster topology func (topo *Specification) GetGlobalOptions() spec.GlobalOptions { return topo.GlobalOptions } // GetMonitoredOptions returns MonitoredOptions func (topo *Specification) GetMonitoredOptions() *spec.MonitoredOptions { return topo.MonitoredOptions } // ComponentsByStopOrder return component in the order need to stop. func (topo *Specification) ComponentsByStopOrder() (comps []Component) { comps = topo.ComponentsByStartOrder() // revert order i := 0 j := len(comps) - 1 for i < j { comps[i], comps[j] = comps[j], comps[i] i++ j-- } return } // ComponentsByStartOrder return component in the order need to start. func (topo *Specification) ComponentsByStartOrder() (comps []Component) { // "dm-master", "dm-worker" comps = append(comps, &DMMasterComponent{topo}) comps = append(comps, &DMWorkerComponent{topo}) comps = append(comps, &spec.MonitorComponent{Topology: topo}) // prometheus comps = append(comps, &spec.GrafanaComponent{Topology: topo}) comps = append(comps, &spec.AlertManagerComponent{Topology: topo}) return } // ComponentsByUpdateOrder return component in the order need to be updated. func (topo *Specification) ComponentsByUpdateOrder(curVer string) (comps []Component) { // "dm-master", "dm-worker" comps = append(comps, &DMMasterComponent{topo}) comps = append(comps, &DMWorkerComponent{topo}) comps = append(comps, &spec.MonitorComponent{Topology: topo}) comps = append(comps, &spec.GrafanaComponent{Topology: topo}) comps = append(comps, &spec.AlertManagerComponent{Topology: topo}) return } // IterComponent iterates all components in component starting order func (topo *Specification) IterComponent(fn func(comp Component)) { for _, comp := range topo.ComponentsByStartOrder() { fn(comp) } } // IterInstance iterates all instances in component starting order func (topo *Specification) IterInstance(fn func(instance Instance), concurrency ...int) { maxWorkers := 1 wg := sync.WaitGroup{} if len(concurrency) > 0 && concurrency[0] > 1 { maxWorkers = concurrency[0] } workerPool := make(chan struct{}, maxWorkers) for _, comp := range topo.ComponentsByStartOrder() { for _, inst := range comp.Instances() { wg.Add(1) workerPool <- struct{}{} go func(inst Instance) { defer func() { <-workerPool wg.Done() }() fn(inst) }(inst) } } wg.Wait() } // IterHost iterates one instance for each host func (topo *Specification) IterHost(fn func(instance Instance)) { hostMap := make(map[string]bool) for _, comp := range topo.ComponentsByStartOrder() { for _, inst := range comp.Instances() { host := inst.GetHost() _, ok := hostMap[host] if !ok { hostMap[host] = true fn(inst) } } } } tiup-1.16.3/components/dm/spec/topology_dm.go000066400000000000000000000620771505422223000211740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "reflect" "strings" "time" "github.com/creasty/defaults" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" ) const ( statusQueryTimeout = 2 * time.Second ) var ( globalOptionTypeName = reflect.TypeOf(GlobalOptions{}).Name() monitorOptionTypeName = reflect.TypeOf(MonitoredOptions{}).Name() serverConfigsTypeName = reflect.TypeOf(DMServerConfigs{}).Name() componentSourcesTypeName = reflect.TypeOf(ComponentSources{}).Name() ) func setDefaultDir(parent, role, port string, field reflect.Value) { if field.String() != "" { return } if defaults.CanUpdate(field.Interface()) { dir := fmt.Sprintf("%s-%s", role, port) field.Set(reflect.ValueOf(filepath.Join(parent, dir))) } } func findField(v reflect.Value, fieldName string) (int, bool) { for i := 0; i < v.NumField(); i++ { if v.Type().Field(i).Name == fieldName { return i, true } } return -1, false } // Skip global/monitored/job options func isSkipField(field reflect.Value) bool { if field.Kind() == reflect.Ptr { if field.IsZero() { return true } field = field.Elem() } tp := field.Type().Name() return tp == globalOptionTypeName || tp == monitorOptionTypeName || tp == serverConfigsTypeName || tp == componentSourcesTypeName } type ( // GlobalOptions of spec. GlobalOptions = spec.GlobalOptions // MonitoredOptions is the spec of Monitored MonitoredOptions = spec.MonitoredOptions // PrometheusSpec is the spec of Prometheus PrometheusSpec = spec.PrometheusSpec // GrafanaSpec is the spec of Grafana GrafanaSpec = spec.GrafanaSpec // AlertmanagerSpec is the spec of Alertmanager AlertmanagerSpec = spec.AlertmanagerSpec // ResourceControl is the spec of ResourceControl ResourceControl = meta.ResourceControl ) type ( // DMServerConfigs represents the server runtime configuration DMServerConfigs struct { Master map[string]any `yaml:"master"` Worker map[string]any `yaml:"worker"` Grafana map[string]string `yaml:"grafana"` } // ComponentSources represents the source of components ComponentSources struct { Master string `yaml:"master,omitempty"` Worker string `yaml:"worker,omitempty"` } // Specification represents the specification of topology.yaml Specification struct { GlobalOptions GlobalOptions `yaml:"global,omitempty" validate:"global:editable"` MonitoredOptions *MonitoredOptions `yaml:"monitored,omitempty" validate:"monitored:editable"` ComponentSources ComponentSources `yaml:"component_sources,omitempty" validate:"component_sources:editable"` ServerConfigs DMServerConfigs `yaml:"server_configs,omitempty" validate:"server_configs:ignore"` Masters []*MasterSpec `yaml:"master_servers"` Workers []*WorkerSpec `yaml:"worker_servers"` Monitors []*spec.PrometheusSpec `yaml:"monitoring_servers"` Grafanas []*spec.GrafanaSpec `yaml:"grafana_servers,omitempty"` Alertmanagers []*spec.AlertmanagerSpec `yaml:"alertmanager_servers,omitempty"` } ) // AllDMComponentNames contains the names of all dm components. // should include all components in ComponentsByStartOrder func AllDMComponentNames() (roles []string) { tp := &Specification{} tp.IterComponent(func(c Component) { roles = append(roles, c.Name()) }) return } // MasterSpec represents the Master topology specification in topology.yaml type MasterSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` // Use Name to get the name with a default value if it's empty. Name string `yaml:"name,omitempty"` Port int `yaml:"port,omitempty" default:"8261"` PeerPort int `yaml:"peer_port,omitempty" default:"8291"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` V1SourcePath string `yaml:"v1_source_path,omitempty"` } // Status queries current status of the instance func (s *MasterSpec) Status(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { if timeout < time.Second { timeout = statusQueryTimeout } addr := utils.JoinHostPort(s.Host, s.Port) dc := api.NewDMMasterClient([]string{addr}, timeout, tlsCfg) isFound, isActive, isLeader, err := dc.GetMaster(s.Name) if err != nil { return "Down" } if !isFound { return "N/A" } if !isActive { return "Unhealthy" } res := "Healthy" if isLeader { res += "|L" } return res } // Role returns the component role of the instance func (s *MasterSpec) Role() string { return ComponentDMMaster } // SSH returns the host and SSH port of the instance func (s *MasterSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *MasterSpec) GetMainPort() int { return s.Port } // IsImported returns if the node is imported from TiDB-Ansible func (s *MasterSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *MasterSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // GetAdvertisePeerURL returns AdvertisePeerURL func (s *MasterSpec) GetAdvertisePeerURL(enableTLS bool) string { scheme := utils.Ternary(enableTLS, "https", "http").(string) return fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(s.Host, s.PeerPort)) } // WorkerSpec represents the Master topology specification in topology.yaml type WorkerSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` // Use Name to get the name with a default value if it's empty. Name string `yaml:"name,omitempty"` Port int `yaml:"port,omitempty" default:"8262"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *WorkerSpec) Status(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, masterList ...string) string { if len(masterList) < 1 { return "N/A" } if timeout < time.Second { timeout = statusQueryTimeout } dc := api.NewDMMasterClient(masterList, timeout, tlsCfg) stage, err := dc.GetWorker(s.Name) if err != nil { return "Down" } if stage == "" { return "N/A" } return stage } // Role returns the component role of the instance func (s *WorkerSpec) Role() string { return ComponentDMWorker } // SSH returns the host and SSH port of the instance func (s *WorkerSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *WorkerSpec) GetMainPort() int { return s.Port } // IsImported returns if the node is imported from TiDB-Ansible func (s *WorkerSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *WorkerSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // UnmarshalYAML sets default values when unmarshaling the topology file func (s *Specification) UnmarshalYAML(unmarshal func(any) error) error { type topology Specification if err := unmarshal((*topology)(s)); err != nil { return err } if err := defaults.Set(s); err != nil { return errors.Trace(err) } if s.MonitoredOptions != nil { // Set monitored options if s.MonitoredOptions.DeployDir == "" { s.MonitoredOptions.DeployDir = filepath.Join(s.GlobalOptions.DeployDir, fmt.Sprintf("%s-%d", spec.RoleMonitor, s.MonitoredOptions.NodeExporterPort)) } if s.MonitoredOptions.DataDir == "" { s.MonitoredOptions.DataDir = filepath.Join(s.GlobalOptions.DataDir, fmt.Sprintf("%s-%d", spec.RoleMonitor, s.MonitoredOptions.NodeExporterPort)) } if s.MonitoredOptions.LogDir == "" { s.MonitoredOptions.LogDir = "log" } if !strings.HasPrefix(s.MonitoredOptions.LogDir, "/") && !strings.HasPrefix(s.MonitoredOptions.LogDir, s.MonitoredOptions.DeployDir) { s.MonitoredOptions.LogDir = filepath.Join(s.MonitoredOptions.DeployDir, s.MonitoredOptions.LogDir) } } if err := fillDMCustomDefaults(&s.GlobalOptions, s); err != nil { return err } return s.Validate() } // platformConflictsDetect checks for conflicts in topology for different OS / Arch // for set to the same host / IP func (s *Specification) platformConflictsDetect() error { type ( conflict struct { os string arch string cfg string } ) platformStats := map[string]conflict{} topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // skip nodes imported from TiDB-Ansible if compSpec.Addr().Interface().(InstanceSpec).IsImported() { continue } // check hostname host := compSpec.FieldByName("Host").String() cfg := topoType.Field(i).Tag.Get("yaml") if host == "" { return errors.Errorf("`%s` contains empty host field", cfg) } // platform conflicts stat := conflict{ cfg: cfg, } if j, found := findField(compSpec, "OS"); found { stat.os = compSpec.Field(j).String() } if j, found := findField(compSpec, "Arch"); found { stat.arch = compSpec.Field(j).String() } prev, exist := platformStats[host] if exist { if prev.os != stat.os || prev.arch != stat.arch { return &meta.ValidateErr{ Type: meta.TypeMismatch, Target: "platform", LHS: fmt.Sprintf("%s:%s/%s", prev.cfg, prev.os, prev.arch), RHS: fmt.Sprintf("%s:%s/%s", stat.cfg, stat.os, stat.arch), Value: host, } } } platformStats[host] = stat } } return nil } func (s *Specification) portConflictsDetect() error { type ( usedPort struct { host string port int } conflict struct { tp string cfg string } ) portTypes := []string{ "Port", "StatusPort", "PeerPort", "ClientPort", "WebPort", "TCPPort", "HTTPPort", "ClusterPort", } portStats := map[usedPort]conflict{} uniqueHosts := set.NewStringSet() topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // skip nodes imported from TiDB-Ansible if compSpec.Addr().Interface().(InstanceSpec).IsImported() { continue } // check hostname host := compSpec.FieldByName("Host").String() cfg := topoType.Field(i).Tag.Get("yaml") if host == "" { return errors.Errorf("`%s` contains empty host field", cfg) } uniqueHosts.Insert(host) // Ports conflicts for _, portType := range portTypes { if j, found := findField(compSpec, portType); found { item := usedPort{ host: host, port: int(compSpec.Field(j).Int()), } tp := compSpec.Type().Field(j).Tag.Get("yaml") prev, exist := portStats[item] if exist { return &meta.ValidateErr{ Type: meta.TypeConflict, Target: "port", LHS: fmt.Sprintf("%s:%s.%s", prev.cfg, item.host, prev.tp), RHS: fmt.Sprintf("%s:%s.%s", cfg, item.host, tp), Value: item.port, } } portStats[item] = conflict{ tp: tp, cfg: cfg, } } } } } // Port conflicts in monitored components monitoredPortTypes := []string{ "NodeExporterPort", "BlackboxExporterPort", } monitoredOpt := topoSpec.FieldByName(monitorOptionTypeName) if monitoredOpt.IsZero() { return nil } monitoredOpt = monitoredOpt.Elem() for host := range uniqueHosts { cfg := "monitored" for _, portType := range monitoredPortTypes { f := monitoredOpt.FieldByName(portType) item := usedPort{ host: host, port: int(f.Int()), } ft, found := monitoredOpt.Type().FieldByName(portType) if !found { return errors.Errorf("incompatible change `%s.%s`", monitorOptionTypeName, portType) } // `yaml:"node_exporter_port,omitempty"` tp := strings.Split(ft.Tag.Get("yaml"), ",")[0] prev, exist := portStats[item] if exist { return &meta.ValidateErr{ Type: meta.TypeConflict, Target: "port", LHS: fmt.Sprintf("%s:%s.%s", prev.cfg, item.host, prev.tp), RHS: fmt.Sprintf("%s:%s.%s", cfg, item.host, tp), Value: item.port, } } portStats[item] = conflict{ tp: tp, cfg: cfg, } } } return nil } func (s *Specification) dirConflictsDetect() error { type ( usedDir struct { host string dir string } conflict struct { tp string cfg string } ) dirTypes := []string{ "DataDir", "DeployDir", } // usedInfo => type var ( dirStats = map[usedDir]conflict{} uniqueHosts = set.NewStringSet() ) topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // skip nodes imported from TiDB-Ansible if compSpec.Addr().Interface().(InstanceSpec).IsImported() { continue } // check hostname host := compSpec.FieldByName("Host").String() cfg := topoType.Field(i).Tag.Get("yaml") if host == "" { return errors.Errorf("`%s` contains empty host field", cfg) } uniqueHosts.Insert(host) // Directory conflicts for _, dirType := range dirTypes { if j, found := findField(compSpec, dirType); found { item := usedDir{ host: host, dir: compSpec.Field(j).String(), } // data_dir is relative to deploy_dir by default, so they can be with // same (sub) paths as long as the deploy_dirs are different if item.dir != "" && !strings.HasPrefix(item.dir, "/") { continue } // `yaml:"data_dir,omitempty"` tp := strings.Split(compSpec.Type().Field(j).Tag.Get("yaml"), ",")[0] prev, exist := dirStats[item] if exist { return &meta.ValidateErr{ Type: meta.TypeConflict, Target: "directory", LHS: fmt.Sprintf("%s:%s.%s", prev.cfg, item.host, prev.tp), RHS: fmt.Sprintf("%s:%s.%s", cfg, item.host, tp), Value: item.dir, } } dirStats[item] = conflict{ tp: tp, cfg: cfg, } } } } } return nil } // CountDir counts for dir paths used by any instance in the cluster with the same // prefix, useful to find potential path conflicts func (s *Specification) CountDir(targetHost, dirPrefix string) int { dirTypes := []string{ "DataDir", "DeployDir", "LogDir", } // host-path -> count dirStats := make(map[string]int) count := 0 topoSpec := reflect.ValueOf(s).Elem() dirPrefix = spec.Abs(s.GlobalOptions.User, dirPrefix) for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // Directory conflicts for _, dirType := range dirTypes { if j, found := findField(compSpec, dirType); found { dir := compSpec.Field(j).String() host := compSpec.FieldByName("Host").String() switch dirType { // the same as in logic.go for (*instance) case "DataDir": deployDir := compSpec.FieldByName("DeployDir").String() // the default data_dir is relative to deploy_dir if dir != "" && !strings.HasPrefix(dir, "/") { dir = filepath.Join(deployDir, dir) } case "LogDir": deployDir := compSpec.FieldByName("DeployDir").String() field := compSpec.FieldByName("LogDir") if field.IsValid() { dir = field.Interface().(string) } if dir == "" { dir = "log" } if !strings.HasPrefix(dir, "/") { dir = filepath.Join(deployDir, dir) } } dir = spec.Abs(s.GlobalOptions.User, dir) dirStats[host+dir]++ } } } } for k, v := range dirStats { if k == targetHost+dirPrefix || strings.HasPrefix(k, targetHost+dirPrefix+"/") { count += v } } return count } // TLSConfig generates a tls.Config for the specification as needed func (s *Specification) TLSConfig(dir string) (*tls.Config, error) { if !s.GlobalOptions.TLSEnabled { return nil, nil } return spec.LoadClientCert(dir) } // Validate validates the topology specification and produce error if the // specification invalid (e.g: port conflicts or directory conflicts) func (s *Specification) Validate() error { if err := s.platformConflictsDetect(); err != nil { return err } if err := s.portConflictsDetect(); err != nil { return err } if err := s.dirConflictsDetect(); err != nil { return err } return spec.RelativePathDetect(s, isSkipField) } // Type implements Topology interface. func (s *Specification) Type() string { return spec.TopoTypeDM } // BaseTopo implements Topology interface. func (s *Specification) BaseTopo() *spec.BaseTopo { return &spec.BaseTopo{ GlobalOptions: &s.GlobalOptions, MonitoredOptions: s.GetMonitoredOptions(), MasterList: s.GetMasterListWithManageHost(), Monitors: s.Monitors, Grafanas: s.Grafanas, Alertmanagers: s.Alertmanagers, } } // NewPart implements ScaleOutTopology interface. func (s *Specification) NewPart() spec.Topology { return &Specification{ GlobalOptions: s.GlobalOptions, MonitoredOptions: s.MonitoredOptions, ServerConfigs: s.ServerConfigs, } } // MergeTopo implements ScaleOutTopology interface. func (s *Specification) MergeTopo(rhs spec.Topology) spec.Topology { other, ok := rhs.(*Specification) if !ok { panic("topo should be DM Topology") } return s.Merge(other) } // GetMasterListWithManageHost returns a list of Master API hosts of the current cluster func (s *Specification) GetMasterListWithManageHost() []string { var masterList []string for _, master := range s.Masters { host := master.Host if master.ManageHost != "" { host = master.ManageHost } masterList = append(masterList, utils.JoinHostPort(host, master.Port)) } return masterList } // FillHostArchOrOS fills the topology with the given host->arch func (s *Specification) FillHostArchOrOS(hostArch map[string]string, fullType spec.FullHostType) error { return spec.FillHostArchOrOS(s, hostArch, fullType) } // Merge returns a new Topology which sum old ones func (s *Specification) Merge(that spec.Topology) spec.Topology { spec := that.(*Specification) return &Specification{ GlobalOptions: s.GlobalOptions, MonitoredOptions: s.MonitoredOptions, ServerConfigs: s.ServerConfigs, Masters: append(s.Masters, spec.Masters...), Workers: append(s.Workers, spec.Workers...), Monitors: append(s.Monitors, spec.Monitors...), Grafanas: append(s.Grafanas, spec.Grafanas...), Alertmanagers: append(s.Alertmanagers, spec.Alertmanagers...), } } // fillDefaults tries to fill custom fields to their default values func fillDMCustomDefaults(globalOptions *GlobalOptions, data any) error { v := reflect.ValueOf(data).Elem() t := v.Type() var err error for i := 0; i < t.NumField(); i++ { if err = setDMCustomDefaults(globalOptions, v.Field(i)); err != nil { return err } } return nil } func setDMCustomDefaults(globalOptions *GlobalOptions, field reflect.Value) error { if !field.CanSet() || isSkipField(field) { return nil } switch field.Kind() { case reflect.Slice: for i := 0; i < field.Len(); i++ { if err := setDMCustomDefaults(globalOptions, field.Index(i)); err != nil { return err } } case reflect.Struct: ref := reflect.New(field.Type()) ref.Elem().Set(field) if err := fillDMCustomDefaults(globalOptions, ref.Interface()); err != nil { return err } field.Set(ref.Elem()) case reflect.Ptr: if err := setDMCustomDefaults(globalOptions, field.Elem()); err != nil { return err } } if field.Kind() != reflect.Struct { return nil } for j := 0; j < field.NumField(); j++ { switch field.Type().Field(j).Name { case "SSHPort": if field.Field(j).Int() != 0 { continue } field.Field(j).Set(reflect.ValueOf(globalOptions.SSHPort)) case "Name": if field.Field(j).String() != "" { continue } host := field.FieldByName("Host").String() port := field.FieldByName("Port").Int() field.Field(j).Set(reflect.ValueOf(fmt.Sprintf("dm-%s-%d", host, port))) case "DataDir": dataDir := field.Field(j).String() if dataDir != "" { // already have a value, skip filling default values continue } // If the data dir in global options is an obsolute path, it appends to // the global and has a comp-port sub directory if strings.HasPrefix(globalOptions.DataDir, "/") { field.Field(j).Set(reflect.ValueOf(filepath.Join( globalOptions.DataDir, fmt.Sprintf("%s-%s", field.Addr().Interface().(InstanceSpec).Role(), getPort(field)), ))) continue } // If the data dir in global options is empty or a relative path, keep it be relative // Our run_*.sh start scripts are run inside deploy_path, so the final location // will be deploy_path/global.data_dir // (the default value of global.data_dir is "data") if globalOptions.DataDir == "" { field.Field(j).Set(reflect.ValueOf("data")) } else { field.Field(j).Set(reflect.ValueOf(globalOptions.DataDir)) } case "DeployDir": setDefaultDir(globalOptions.DeployDir, field.Addr().Interface().(InstanceSpec).Role(), getPort(field), field.Field(j)) case "LogDir": if field.Field(j).String() == "" && defaults.CanUpdate(field.Field(j).Interface()) { field.Field(j).Set(reflect.ValueOf(globalOptions.LogDir)) } case "Arch": switch strings.ToLower(field.Field(j).String()) { // replace "x86_64" with amd64, they are the same in our repo case "x86_64": field.Field(j).Set(reflect.ValueOf("amd64")) // replace "aarch64" with arm64 case "aarch64": field.Field(j).Set(reflect.ValueOf("arm64")) } // convert to lower case if field.Field(j).String() != "" { field.Field(j).Set(reflect.ValueOf(strings.ToLower(field.Field(j).String()))) } case "OS": // convert to lower case if field.Field(j).String() != "" { field.Field(j).Set(reflect.ValueOf(strings.ToLower(field.Field(j).String()))) } } } return nil } func getPort(v reflect.Value) string { for i := 0; i < v.NumField(); i++ { switch v.Type().Field(i).Name { case "Port", "ClientPort", "WebPort", "TCPPort", "NodeExporterPort": return fmt.Sprintf("%d", v.Field(i).Int()) } } return "" } // GetGrafanaConfig returns global grafana configurations func (s *Specification) GetGrafanaConfig() map[string]string { return s.ServerConfigs.Grafana } tiup-1.16.3/components/dm/spec/topology_dm_test.go000066400000000000000000000372051505422223000222260ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "os" "testing" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestDefaultDataDir(t *testing.T) { // Test with without global DataDir. topo := new(Specification) topo.Masters = append(topo.Masters, &MasterSpec{Host: "1.1.1.1", Port: 1111}) topo.Workers = append(topo.Workers, &WorkerSpec{Host: "1.1.2.1", Port: 2221}) data, err := yaml.Marshal(topo) require.NoError(t, err) // Check default value. topo = new(Specification) err = yaml.Unmarshal(data, topo) require.NoError(t, err) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "data", topo.Masters[0].DataDir) require.Equal(t, "data", topo.Workers[0].DataDir) // Can keep the default value. data, err = yaml.Marshal(topo) require.NoError(t, err) topo = new(Specification) err = yaml.Unmarshal(data, topo) require.NoError(t, err) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "data", topo.Masters[0].DataDir) require.Equal(t, "data", topo.Workers[0].DataDir) // Test with global DataDir. topo = new(Specification) topo.GlobalOptions.DataDir = "/gloable_data" topo.Masters = append(topo.Masters, &MasterSpec{Host: "1.1.1.1", Port: 1111}) topo.Masters = append(topo.Masters, &MasterSpec{Host: "1.1.1.2", Port: 1112, DataDir: "/my_data"}) topo.Workers = append(topo.Workers, &WorkerSpec{Host: "1.1.2.1", Port: 2221}) topo.Workers = append(topo.Workers, &WorkerSpec{Host: "1.1.2.2", Port: 2222, DataDir: "/my_data"}) data, err = yaml.Marshal(topo) require.NoError(t, err) topo = new(Specification) err = yaml.Unmarshal(data, topo) require.NoError(t, err) require.Equal(t, "/gloable_data", topo.GlobalOptions.DataDir) require.Equal(t, "/gloable_data/dm-master-1111", topo.Masters[0].DataDir) require.Equal(t, "/my_data", topo.Masters[1].DataDir) require.Equal(t, "/gloable_data/dm-worker-2221", topo.Workers[0].DataDir) require.Equal(t, "/my_data", topo.Workers[1].DataDir) } func TestGlobalOptions(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" master_servers: - host: 172.16.5.138 deploy_dir: "master-deploy" worker_servers: - host: 172.16.5.53 data_dir: "worker-data" `), &topo) require.NoError(t, err) require.Equal(t, "test1", topo.GlobalOptions.User) require.Equal(t, 220, topo.GlobalOptions.SSHPort) require.Equal(t, 220, topo.Masters[0].SSHPort) require.Equal(t, "master-deploy", topo.Masters[0].DeployDir) require.Equal(t, 220, topo.Workers[0].SSHPort) require.Equal(t, "test-deploy/dm-worker-8262", topo.Workers[0].DeployDir) require.Equal(t, "worker-data", topo.Workers[0].DataDir) } func TestDirectoryConflicts(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" master_servers: - host: 172.16.5.138 deploy_dir: "/test-1" worker_servers: - host: 172.16.5.138 data_dir: "/test-1" `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/test-1' between 'master_servers:172.16.5.138.deploy_dir' and 'worker_servers:172.16.5.138.data_dir'", err.Error()) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "/test-data" master_servers: - host: 172.16.5.138 data_dir: "test-1" worker_servers: - host: 172.16.5.138 data_dir: "test-1" `), &topo) require.NoError(t, err) } func TestPortConflicts(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" master_servers: - host: 172.16.5.138 peer_port: 1234 worker_servers: - host: 172.16.5.138 port: 1234 `), &topo) require.Error(t, err) require.Equal(t, "port conflict for '1234' between 'master_servers:172.16.5.138.peer_port,omitempty' and 'worker_servers:172.16.5.138.port,omitempty'", err.Error()) } func TestPlatformConflicts(t *testing.T) { // aarch64 and arm64 are equal topo := Specification{} err := yaml.Unmarshal([]byte(` global: os: "linux" arch: "aarch64" master_servers: - host: 172.16.5.138 arch: "arm64" worker_servers: - host: 172.16.5.138 arch: "aarch64" `), &topo) require.NoError(t, err) // different arch defined for the same host topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" master_servers: - host: 172.16.5.138 arch: "aarch64" os: "linux" worker_servers: - host: 172.16.5.138 arch: "amd64" os: "linux" `), &topo) require.Error(t, err) require.Equal(t, "platform mismatch for '172.16.5.138' between 'master_servers:linux/arm64' and 'worker_servers:linux/amd64'", err.Error()) // different os defined for the same host topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" arch: "aarch64" master_servers: - host: 172.16.5.138 os: "darwin" arch: "aarch64" worker_servers: - host: 172.16.5.138 os: "linux" arch: "aarch64" `), &topo) require.Error(t, err) require.Equal(t, "platform mismatch for '172.16.5.138' between 'master_servers:darwin/arm64' and 'worker_servers:linux/arm64'", err.Error()) } func TestCountDir(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" master_servers: - host: 172.16.5.138 deploy_dir: "master-deploy" data_dir: "/test-data/data-1" worker_servers: - host: 172.16.5.53 data_dir: "test-1" `), &topo) require.NoError(t, err) cnt := topo.CountDir("172.16.5.53", "test-deploy/dm-worker-8262") require.Equal(t, 3, cnt) cnt = topo.CountDir("172.16.5.138", "/test-data/data") require.Equal(t, 0, cnt) // should not match partial path err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "/test-deploy" master_servers: - host: 172.16.5.138 deploy_dir: "master-deploy" data_dir: "/test-data/data-1" worker_servers: - host: 172.16.5.138 data_dir: "/test-data/data-2" `), &topo) require.NoError(t, err) cnt = topo.CountDir("172.16.5.138", "/test-deploy/dm-worker-8262") require.Equal(t, 2, cnt) cnt = topo.CountDir("172.16.5.138", "") require.Equal(t, 2, cnt) cnt = topo.CountDir("172.16.5.138", "test-data") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.16.5.138", "/test-data") require.Equal(t, 2, cnt) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "/test-deploy" data_dir: "/test-data" master_servers: - host: 172.16.5.138 data_dir: "data-1" worker_servers: - host: 172.16.5.138 data_dir: "data-2" - host: 172.16.5.53 `), &topo) require.NoError(t, err) // if per-instance data_dir is set, the global data_dir is ignored, and if it // is a relative path, it will be under the instance's deploy_dir cnt = topo.CountDir("172.16.5.138", "/test-deploy/dm-worker-8262") require.Equal(t, 3, cnt) cnt = topo.CountDir("172.16.5.138", "") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.16.5.53", "/test-data") require.Equal(t, 1, cnt) } func withTempFile(t *testing.T, content string, fn func(string)) { file, err := os.CreateTemp("/tmp", "topology-test") require.NoError(t, err) defer os.Remove(file.Name()) _, err = file.WriteString(content) require.NoError(t, err) file.Close() fn(file.Name()) } func with2TempFile(t *testing.T, content1, content2 string, fn func(string, string)) { withTempFile(t, content1, func(file1 string) { withTempFile(t, content2, func(file2 string) { fn(file1, file2) }) }) } func merge4test(base, scale string) (*Specification, error) { baseTopo := Specification{} if err := spec.ParseTopologyYaml(base, &baseTopo); err != nil { return nil, err } scaleTopo := baseTopo.NewPart() if err := spec.ParseTopologyYaml(scale, scaleTopo); err != nil { return nil, err } mergedTopo := baseTopo.MergeTopo(scaleTopo) if err := mergedTopo.Validate(); err != nil { return nil, err } return mergedTopo.(*Specification), nil } func TestRelativePath(t *testing.T) { // base test withTempFile(t, ` master_servers: - host: 172.16.5.140 worker_servers: - host: 172.16.5.140 `, func(file string) { topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) require.NoError(t, err) spec.ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/deploy/dm-master-8261", topo.Masters[0].DeployDir) require.Equal(t, "/home/tidb/deploy/dm-worker-8262", topo.Workers[0].DeployDir) }) // test data dir & log dir withTempFile(t, ` master_servers: - host: 172.16.5.140 deploy_dir: my-deploy data_dir: my-data log_dir: my-log `, func(file string) { topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) require.NoError(t, err) spec.ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/my-deploy", topo.Masters[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/my-data", topo.Masters[0].DataDir) require.Equal(t, "/home/tidb/my-deploy/my-log", topo.Masters[0].LogDir) }) // test global options, case 1 withTempFile(t, ` global: deploy_dir: my-deploy master_servers: - host: 172.16.5.140 `, func(file string) { topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) require.NoError(t, err) spec.ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/my-deploy/dm-master-8261", topo.Masters[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/dm-master-8261/data", topo.Masters[0].DataDir) require.Equal(t, "", topo.Masters[0].LogDir) }) // test global options, case 2 withTempFile(t, ` global: deploy_dir: my-deploy master_servers: - host: 172.16.5.140 worker_servers: - host: 172.16.5.140 port: 20160 - host: 172.16.5.140 port: 20161 `, func(file string) { topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) require.NoError(t, err) spec.ExpandRelativeDir(&topo) require.Equal(t, "my-deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20160", topo.Workers[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20160/data", topo.Workers[0].DataDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20161", topo.Workers[1].DeployDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20161/data", topo.Workers[1].DataDir) }) // test global options, case 3 withTempFile(t, ` global: deploy_dir: my-deploy master_servers: - host: 172.16.5.140 worker_servers: - host: 172.16.5.140 port: 20160 data_dir: my-data log_dir: my-log - host: 172.16.5.140 port: 20161 `, func(file string) { topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) require.NoError(t, err) spec.ExpandRelativeDir(&topo) require.Equal(t, "my-deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20160", topo.Workers[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20160/my-data", topo.Workers[0].DataDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20160/my-log", topo.Workers[0].LogDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20161", topo.Workers[1].DeployDir) require.Equal(t, "/home/tidb/my-deploy/dm-worker-20161/data", topo.Workers[1].DataDir) require.Equal(t, "", topo.Workers[1].LogDir) }) // test global options, case 4 withTempFile(t, ` global: data_dir: my-global-data log_dir: my-global-log master_servers: - host: 172.16.5.140 worker_servers: - host: 172.16.5.140 port: 20160 data_dir: my-local-data log_dir: my-local-log - host: 172.16.5.140 port: 20161 `, func(file string) { topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) require.NoError(t, err) spec.ExpandRelativeDir(&topo) require.Equal(t, "deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "my-global-data", topo.GlobalOptions.DataDir) require.Equal(t, "my-global-log", topo.GlobalOptions.LogDir) require.Equal(t, "/home/tidb/deploy/dm-worker-20160", topo.Workers[0].DeployDir) require.Equal(t, "/home/tidb/deploy/dm-worker-20160/my-local-data", topo.Workers[0].DataDir) require.Equal(t, "/home/tidb/deploy/dm-worker-20160/my-local-log", topo.Workers[0].LogDir) require.Equal(t, "/home/tidb/deploy/dm-worker-20161", topo.Workers[1].DeployDir) require.Equal(t, "/home/tidb/deploy/dm-worker-20161/my-global-data", topo.Workers[1].DataDir) require.Equal(t, "/home/tidb/deploy/dm-worker-20161/my-global-log", topo.Workers[1].LogDir) }) } func TestTopologyMerge(t *testing.T) { // base test with2TempFile(t, ` master_servers: - host: 172.16.5.140 worker_servers: - host: 172.16.5.140 `, ` worker_servers: - host: 172.16.5.139 `, func(base, scale string) { topo, err := merge4test(base, scale) require.NoError(t, err) spec.ExpandRelativeDir(topo) require.Equal(t, "/home/tidb/deploy/dm-worker-8262", topo.Workers[0].DeployDir) require.Equal(t, "/home/tidb/deploy/dm-worker-8262/data", topo.Workers[0].DataDir) require.Equal(t, "", topo.Workers[0].LogDir) require.Equal(t, "/home/tidb/deploy/dm-worker-8262", topo.Workers[1].DeployDir) require.Equal(t, "/home/tidb/deploy/dm-worker-8262/data", topo.Workers[1].DataDir) require.Equal(t, "", topo.Workers[1].LogDir) }) // test global option overwrite with2TempFile(t, ` global: user: test deploy_dir: /my-global-deploy master_servers: - host: 172.16.5.140 worker_servers: - host: 172.16.5.140 log_dir: my-local-log data_dir: my-local-data - host: 172.16.5.175 deploy_dir: flash-deploy - host: 172.16.5.141 `, ` worker_servers: - host: 172.16.5.139 deploy_dir: flash-deploy - host: 172.16.5.134 `, func(base, scale string) { topo, err := merge4test(base, scale) require.NoError(t, err) spec.ExpandRelativeDir(topo) require.Equal(t, "/my-global-deploy/dm-worker-8262", topo.Workers[0].DeployDir) require.Equal(t, "/my-global-deploy/dm-worker-8262/my-local-data", topo.Workers[0].DataDir) require.Equal(t, "/my-global-deploy/dm-worker-8262/my-local-log", topo.Workers[0].LogDir) require.Equal(t, "/home/test/flash-deploy", topo.Workers[1].DeployDir) require.Equal(t, "/home/test/flash-deploy/data", topo.Workers[1].DataDir) require.Equal(t, "/home/test/flash-deploy", topo.Workers[3].DeployDir) require.Equal(t, "/home/test/flash-deploy/data", topo.Workers[3].DataDir) require.Equal(t, "/my-global-deploy/dm-worker-8262", topo.Workers[2].DeployDir) require.Equal(t, "/my-global-deploy/dm-worker-8262/data", topo.Workers[2].DataDir) require.Equal(t, "/my-global-deploy/dm-worker-8262", topo.Workers[4].DeployDir) require.Equal(t, "/my-global-deploy/dm-worker-8262/data", topo.Workers[4].DataDir) }) } func TestMonitorLogDir(t *testing.T) { withTempFile(t, ` monitored: node_exporter_port: 39100 blackbox_exporter_port: 39115 deploy_dir: "test-deploy" log_dir: "test-deploy/log" `, func(file string) { topo := Specification{} err := spec.ParseTopologyYaml(file, &topo) require.NoError(t, err) require.Equal(t, 39100, topo.MonitoredOptions.NodeExporterPort) require.Equal(t, 39115, topo.MonitoredOptions.BlackboxExporterPort) require.Equal(t, "test-deploy/log", topo.MonitoredOptions.LogDir) require.Equal(t, "test-deploy", topo.MonitoredOptions.DeployDir) }) } tiup-1.16.3/components/dm/task/000077500000000000000000000000001505422223000163055ustar00rootroot00000000000000tiup-1.16.3/components/dm/task/update_dm_meta.go000066400000000000000000000062731505422223000216140ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "strings" dmspec "github.com/pingcap/tiup/components/dm/spec" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/set" ) // UpdateDMMeta is used to maintain the DM meta information type UpdateDMMeta struct { cluster string metadata *dmspec.Metadata deletedNodesID []string } // NewUpdateDMMeta create i update dm meta task. func NewUpdateDMMeta(cluster string, metadata *dmspec.Metadata, deletedNodesID []string) *UpdateDMMeta { return &UpdateDMMeta{ cluster: cluster, metadata: metadata, deletedNodesID: deletedNodesID, } } // Execute implements the Task interface // the metadata especially the topology is in wide use, // the other callers point to this field by a pointer, // so we should update the original topology directly, and don't make a copy func (u *UpdateDMMeta) Execute(ctx context.Context) error { deleted := set.NewStringSet(u.deletedNodesID...) topo := u.metadata.Topology masters := make([]*dmspec.MasterSpec, 0) for i, instance := range (&dmspec.DMMasterComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } masters = append(masters, topo.Masters[i]) } topo.Masters = masters workers := make([]*dmspec.WorkerSpec, 0) for i, instance := range (&dmspec.DMWorkerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } workers = append(workers, topo.Workers[i]) } topo.Workers = workers monitors := make([]*spec.PrometheusSpec, 0) for i, instance := range (&spec.MonitorComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } monitors = append(monitors, topo.Monitors[i]) } topo.Monitors = monitors grafanas := make([]*spec.GrafanaSpec, 0) for i, instance := range (&spec.GrafanaComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } grafanas = append(grafanas, topo.Grafanas[i]) } topo.Grafanas = grafanas alertmanagers := make([]*spec.AlertmanagerSpec, 0) for i, instance := range (&spec.AlertManagerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } alertmanagers = append(alertmanagers, topo.Alertmanagers[i]) } topo.Alertmanagers = alertmanagers return dmspec.GetSpecManager().SaveMeta(u.cluster, u.metadata) } // Rollback implements the Task interface func (u *UpdateDMMeta) Rollback(ctx context.Context) error { return dmspec.GetSpecManager().SaveMeta(u.cluster, u.metadata) } // String implements the fmt.Stringer interface func (u *UpdateDMMeta) String() string { return fmt.Sprintf("UpdateMeta: cluster=%s, deleted=`'%s'`", u.cluster, strings.Join(u.deletedNodesID, "','")) } tiup-1.16.3/components/playground/000077500000000000000000000000001505422223000171275ustar00rootroot00000000000000tiup-1.16.3/components/playground/command.go000066400000000000000000000220631505422223000210770ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "bytes" "encoding/json" "fmt" "io" "net/http" "os" "strconv" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/playground/instance" "github.com/spf13/cobra" ) // CommandType send to playground. type CommandType string // types of CommandType const ( ScaleInCommandType CommandType = "scale-in" ScaleOutCommandType CommandType = "scale-out" DisplayCommandType CommandType = "display" ) // Command send to Playground. type Command struct { CommandType CommandType PID int // Set when scale-in ComponentID string instance.Config } func buildCommands(tp CommandType, opt *BootOptions) (cmds []Command) { commands := []struct { comp string instance.Config }{ {"pd", opt.PD}, {"tso", opt.TSO}, {"scheduling", opt.Scheduling}, {"tikv", opt.TiKV}, {"pump", opt.Pump}, {"tiflash", opt.TiFlash}, {"tiproxy", opt.TiProxy}, {"tidb", opt.TiDB}, {"ticdc", opt.TiCDC}, {"tikv-cdc", opt.TiKVCDC}, {"drainer", opt.Drainer}, {"dm-master", opt.DMMaster}, {"dm-worker", opt.DMWorker}, } for _, cmd := range commands { for i := 0; i < cmd.Num; i++ { c := Command{ CommandType: tp, ComponentID: cmd.comp, Config: cmd.Config, } cmds = append(cmds, c) } } return } func newScaleOut() *cobra.Command { var opt BootOptions cmd := &cobra.Command{ Use: "scale-out instances", Example: "tiup playground scale-out --db 1", RunE: func(cmd *cobra.Command, args []string) error { num, err := scaleOut(args, &opt) if err != nil { return err } if num == 0 { return cmd.Help() } return nil }, Hidden: false, } cmd.Flags().IntVarP(&opt.TiDB.Num, "db", "", opt.TiDB.Num, "TiDB instance number") cmd.Flags().IntVarP(&opt.TiKV.Num, "kv", "", opt.TiKV.Num, "TiKV instance number") cmd.Flags().IntVarP(&opt.PD.Num, "pd", "", opt.PD.Num, "PD instance number") cmd.Flags().IntVarP(&opt.TSO.Num, "tso", "", opt.TSO.Num, "TSO instance number") cmd.Flags().IntVarP(&opt.Scheduling.Num, "scheduling", "", opt.Scheduling.Num, "Scheduling instance number") cmd.Flags().IntVarP(&opt.TiFlash.Num, "tiflash", "", opt.TiFlash.Num, "TiFlash instance number") cmd.Flags().IntVarP(&opt.TiProxy.Num, "tiproxy", "", opt.TiProxy.Num, "TiProxy instance number") cmd.Flags().IntVarP(&opt.TiCDC.Num, "ticdc", "", opt.TiCDC.Num, "TiCDC instance number") cmd.Flags().IntVarP(&opt.TiKVCDC.Num, "kvcdc", "", opt.TiKVCDC.Num, "TiKV-CDC instance number") cmd.Flags().IntVarP(&opt.Pump.Num, "pump", "", opt.Pump.Num, "Pump instance number") cmd.Flags().IntVarP(&opt.Drainer.Num, "drainer", "", opt.Pump.Num, "Drainer instance number") cmd.Flags().StringVarP(&opt.TiDB.Host, "db.host", "", opt.TiDB.Host, "Playground TiDB host. If not provided, TiDB will still use `host` flag as its host") cmd.Flags().StringVarP(&opt.PD.Host, "pd.host", "", opt.PD.Host, "Playground PD host. If not provided, PD will still use `host` flag as its host") cmd.Flags().StringVarP(&opt.TSO.Host, "tso.host", "", opt.TSO.Host, "Playground TSO host. If not provided, TSO will still use `host` flag as its host") cmd.Flags().StringVarP(&opt.Scheduling.Host, "scheduling.host", "", opt.Scheduling.Host, "Playground Scheduling host. If not provided, Scheduling will still use `host` flag as its host") cmd.Flags().StringVarP(&opt.TiProxy.Host, "tiproxy.host", "", opt.PD.Host, "Playground TiProxy host. If not provided, TiProxy will still use `host` flag as its host") cmd.Flags().IntVarP(&opt.DMMaster.Num, "dm-master", "", opt.DMMaster.Num, "DM-master instance number") cmd.Flags().IntVarP(&opt.DMWorker.Num, "dm-worker", "", opt.DMWorker.Num, "DM-worker instance number") cmd.Flags().StringVarP(&opt.TiDB.ConfigPath, "db.config", "", opt.TiDB.ConfigPath, "TiDB instance configuration file") cmd.Flags().StringVarP(&opt.TiKV.ConfigPath, "kv.config", "", opt.TiKV.ConfigPath, "TiKV instance configuration file") cmd.Flags().StringVarP(&opt.PD.ConfigPath, "pd.config", "", opt.PD.ConfigPath, "PD instance configuration file") cmd.Flags().StringVarP(&opt.TSO.ConfigPath, "tso.config", "", opt.TSO.ConfigPath, "TSO instance configuration file") cmd.Flags().StringVarP(&opt.Scheduling.ConfigPath, "scheduling.config", "", opt.Scheduling.ConfigPath, "Scheduling instance configuration file") cmd.Flags().StringVarP(&opt.TiFlash.ConfigPath, "tiflash.config", "", opt.TiFlash.ConfigPath, "TiFlash instance configuration file") cmd.Flags().StringVarP(&opt.TiProxy.ConfigPath, "tiproxy.config", "", opt.TiProxy.ConfigPath, "TiProxy instance configuration file") cmd.Flags().StringVarP(&opt.Pump.ConfigPath, "pump.config", "", opt.Pump.ConfigPath, "Pump instance configuration file") cmd.Flags().StringVarP(&opt.Drainer.ConfigPath, "drainer.config", "", opt.Drainer.ConfigPath, "Drainer instance configuration file") cmd.Flags().StringVarP(&opt.DMMaster.ConfigPath, "dm-master.config", "", opt.DMMaster.ConfigPath, "DM-master instance configuration file") cmd.Flags().StringVarP(&opt.DMWorker.ConfigPath, "dm-worker.config", "", opt.DMWorker.ConfigPath, "DM-worker instance configuration file") cmd.Flags().StringVarP(&opt.TiDB.BinPath, "db.binpath", "", opt.TiDB.BinPath, "TiDB instance binary path") cmd.Flags().StringVarP(&opt.TiKV.BinPath, "kv.binpath", "", opt.TiKV.BinPath, "TiKV instance binary path") cmd.Flags().StringVarP(&opt.PD.BinPath, "pd.binpath", "", opt.PD.BinPath, "PD instance binary path") cmd.Flags().StringVarP(&opt.TSO.BinPath, "tso.binpath", "", opt.TSO.BinPath, "TSO instance binary path") cmd.Flags().StringVarP(&opt.Scheduling.BinPath, "scheduling.binpath", "", opt.Scheduling.BinPath, "Scheduling instance binary path") cmd.Flags().StringVarP(&opt.TiFlash.BinPath, "tiflash.binpath", "", opt.TiFlash.BinPath, "TiFlash instance binary path") cmd.Flags().StringVarP(&opt.TiProxy.BinPath, "tiproxy.binpath", "", opt.TiProxy.BinPath, "TiProxy instance binary path") cmd.Flags().StringVarP(&opt.TiCDC.BinPath, "ticdc.binpath", "", opt.TiCDC.BinPath, "TiCDC instance binary path") cmd.Flags().StringVarP(&opt.TiKVCDC.BinPath, "kvcdc.binpath", "", opt.TiKVCDC.BinPath, "TiKVCDC instance binary path") cmd.Flags().StringVarP(&opt.Pump.BinPath, "pump.binpath", "", opt.Pump.BinPath, "Pump instance binary path") cmd.Flags().StringVarP(&opt.Drainer.BinPath, "drainer.binpath", "", opt.Drainer.BinPath, "Drainer instance binary path") cmd.Flags().StringVarP(&opt.DMMaster.BinPath, "dm-master.binpath", "", opt.DMMaster.BinPath, "DM-master instance binary path") cmd.Flags().StringVarP(&opt.DMWorker.BinPath, "dm-worker.binpath", "", opt.DMWorker.BinPath, "DM-worker instance binary path") return cmd } func newScaleIn() *cobra.Command { var pids []int cmd := &cobra.Command{ Use: "scale-in a instance with specified pid", Example: "tiup playground scale-in --pid 234 # You can get pid by `tiup playground display`", RunE: func(cmd *cobra.Command, args []string) error { if len(pids) == 0 { return cmd.Help() } return scaleIn(pids) }, Hidden: false, } cmd.Flags().IntSliceVar(&pids, "pid", nil, "pid of instance to be scale in") return cmd } func newDisplay() *cobra.Command { cmd := &cobra.Command{ Use: "display the instances.", Hidden: false, RunE: func(cmd *cobra.Command, args []string) error { return display(args) }, } return cmd } func scaleIn(pids []int) error { port, err := targetTag() if err != nil { return err } var cmds []Command for _, pid := range pids { c := Command{ CommandType: ScaleInCommandType, PID: pid, } cmds = append(cmds, c) } addr := "127.0.0.1:" + strconv.Itoa(port) return sendCommandsAndPrintResult(cmds, addr) } func scaleOut(args []string, opt *BootOptions) (num int, err error) { port, err := targetTag() if err != nil { return 0, err } cmds := buildCommands(ScaleOutCommandType, opt) if len(cmds) == 0 { return 0, nil } addr := "127.0.0.1:" + strconv.Itoa(port) return len(cmds), sendCommandsAndPrintResult(cmds, addr) } func display(args []string) error { port, err := targetTag() if err != nil { return err } c := Command{ CommandType: DisplayCommandType, } addr := "127.0.0.1:" + strconv.Itoa(port) return sendCommandsAndPrintResult([]Command{c}, addr) } func sendCommandsAndPrintResult(cmds []Command, addr string) error { for _, cmd := range cmds { data, err := json.Marshal(&cmd) if err != nil { return errors.AddStack(err) } url := fmt.Sprintf("http://%s/command", addr) resp, err := http.Post(url, "application/json", bytes.NewReader(data)) if err != nil { return errors.AddStack(err) } defer resp.Body.Close() _, err = io.Copy(os.Stdout, resp.Body) if err != nil { return errors.AddStack(err) } } return nil } tiup-1.16.3/components/playground/env.go000066400000000000000000000026211505422223000202470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "os" "path/filepath" "github.com/pingcap/errors" ) // targetTag find the target playground we want to send the command. // first try the tag of current instance, then find the first playground. // so, if running multi playground, you must specify the tag to send the command to. // note the flowing two instance will use two different tag. // 1. tiup playground // 2. tiup playground display func targetTag() (port int, err error) { port, err = loadPort(dataDir) if err == nil { return port, nil } err = nil _ = filepath.Walk(filepath.Dir(dataDir), func(path string, info os.FileInfo, err error) error { if port != 0 { return filepath.SkipDir } // ignore error if err != nil { return nil } if !info.IsDir() { return nil } port, _ = loadPort(path) return nil }) if port == 0 { return 0, errors.Errorf("no playground running") } return } tiup-1.16.3/components/playground/grafana.go000066400000000000000000000113731505422223000210620ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "os" "os/exec" "path" "path/filepath" "regexp" "strings" "sync" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/playground/instance" tiupexec "github.com/pingcap/tiup/pkg/exec" "github.com/pingcap/tiup/pkg/utils" ) type grafana struct { host string port int version string waitErr error waitOnce sync.Once cmd *exec.Cmd } func newGrafana(version string, host string, port int) *grafana { return &grafana{ host: host, version: version, port: port, } } // ref: https://grafana.com/docs/grafana/latest/administration/provisioning/ func writeDatasourceConfig(fname string, clusterName string, p8sURL string) error { err := makeSureDir(fname) if err != nil { return err } tpl := `apiVersion: 1 datasources: - name: %s type: prometheus access: proxy url: %s withCredentials: false isDefault: false tlsAuth: false tlsAuthWithCACert: false version: 1 editable: true ` s := fmt.Sprintf(tpl, clusterName, p8sURL) err = utils.WriteFile(fname, []byte(s), 0644) if err != nil { return errors.AddStack(err) } return nil } // ref: templates/scripts/run_grafana.sh.tpl // replace the data source in json to the one we are using. func replaceDatasource(dashboardDir string, datasourceName string) error { // for "s/\${DS_.*-CLUSTER}/datasourceName/g re := regexp.MustCompile(`\${DS_.*-CLUSTER}`) err := filepath.Walk(dashboardDir, func(path string, info os.FileInfo, err error) error { if err != nil { fmt.Printf("skip scan %s failed: %v", path, err) return nil } if info.IsDir() { return nil } data, err := os.ReadFile(path) if err != nil { return errors.AddStack(err) } s := string(data) s = strings.ReplaceAll(s, "test-cluster", datasourceName) s = strings.ReplaceAll(s, "Test-Cluster", datasourceName) s = strings.ReplaceAll(s, "${DS_LIGHTNING}", datasourceName) s = re.ReplaceAllLiteralString(s, datasourceName) return utils.WriteFile(path, []byte(s), 0644) }) if err != nil { return err } return nil } func writeDashboardConfig(fname string, clusterName string, dir string) error { err := makeSureDir(fname) if err != nil { return err } tpl := `apiVersion: 1 providers: - name: %s folder: %s type: file disableDeletion: false allowUiUpdates: true editable: true updateIntervalSeconds: 30 options: path: %s ` s := fmt.Sprintf(tpl, clusterName, clusterName, dir) err = utils.WriteFile(fname, []byte(s), 0644) if err != nil { return errors.AddStack(err) } return nil } func makeSureDir(fname string) error { return utils.MkdirAll(filepath.Dir(fname), 0755) } var clusterName = "Test-Cluster" // dir should contains files untar the grafana. // return not error iff the Cmd is started successfully. func (g *grafana) start(ctx context.Context, dir string, portOffset int, p8sURL string) (err error) { g.port = utils.MustGetFreePort(g.host, g.port, portOffset) fname := filepath.Join(dir, "conf", "provisioning", "dashboards", "dashboard.yml") err = writeDashboardConfig(fname, clusterName, filepath.Join(dir, "dashboards")) if err != nil { return err } fname = filepath.Join(dir, "conf", "provisioning", "datasources", "datasource.yml") err = writeDatasourceConfig(fname, clusterName, p8sURL) if err != nil { return err } tpl := ` [server] # The ip address to bind to, empty will bind to all interfaces http_addr = %s # The http port to use http_port = %d ` err = utils.MkdirAll(filepath.Join(dir, "conf"), 0755) if err != nil { return errors.AddStack(err) } custome := fmt.Sprintf(tpl, g.host, g.port) customeFName := filepath.Join(dir, "conf", "custom.ini") err = utils.WriteFile(customeFName, []byte(custome), 0644) if err != nil { return errors.AddStack(err) } args := []string{ "--homepath", dir, "--config", customeFName, fmt.Sprintf("cfg:default.paths.logs=%s", path.Join(dir, "log")), } var binPath string if binPath, err = tiupexec.PrepareBinary("grafana", utils.Version(g.version), binPath); err != nil { return err } cmd := instance.PrepareCommand(ctx, binPath, args, nil, dir) g.cmd = cmd return g.cmd.Start() } func (g *grafana) wait() error { g.waitOnce.Do(func() { g.waitErr = g.cmd.Wait() }) return g.waitErr } tiup-1.16.3/components/playground/grafana_test.go000066400000000000000000000022651505422223000221210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "os" "path/filepath" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestReplaceDatasource(t *testing.T) { origin := ` a: ${DS_1-CLUSTER} b: test-cluster c: Test-Cluster d: ${DS_LIGHTNING} ` dir, err := os.MkdirTemp("", "play_replace_test_*") assert.Nil(t, err) defer os.RemoveAll(dir) fname := filepath.Join(dir, "a.json") err = os.WriteFile(fname, []byte(origin), 0644) assert.Nil(t, err) name := "myname" err = replaceDatasource(dir, name) assert.Nil(t, err) data, err := os.ReadFile(fname) assert.Nil(t, err) replaced := string(data) n := strings.Count(replaced, name) assert.Equal(t, 4, n, "replaced: %s", replaced) } tiup-1.16.3/components/playground/instance/000077500000000000000000000000001505422223000207335ustar00rootroot00000000000000tiup-1.16.3/components/playground/instance/dm_master.go000066400000000000000000000050041505422223000232340ustar00rootroot00000000000000package instance import ( "context" "fmt" "path/filepath" "strings" "github.com/pingcap/tiup/pkg/utils" ) // DMMaster represent a DM master instance. type DMMaster struct { instance Process initEndpoints []*DMMaster } var _ Instance = &DMMaster{} // NewDMMaster create a new DMMaster instance. func NewDMMaster(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, port int) *DMMaster { if port <= 0 { port = 8261 } return &DMMaster{ instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, 8291, shOpt.PortOffset), // Similar like PD's client port, here use StatusPort for Master Port. StatusPort: utils.MustGetFreePort(host, port, shOpt.PortOffset), ConfigPath: configPath, }, } } // Name return the name of the instance. func (m *DMMaster) Name() string { return fmt.Sprintf("dm-master-%d", m.ID) } // Start starts the instance. func (m *DMMaster) Start(ctx context.Context) error { args := []string{ fmt.Sprintf("--name=%s", m.Name()), fmt.Sprintf("--master-addr=http://%s", utils.JoinHostPort(m.Host, m.StatusPort)), fmt.Sprintf("--advertise-addr=http://%s", utils.JoinHostPort(AdvertiseHost(m.Host), m.StatusPort)), fmt.Sprintf("--peer-urls=http://%s", utils.JoinHostPort(m.Host, m.Port)), fmt.Sprintf("--advertise-peer-urls=http://%s", utils.JoinHostPort(AdvertiseHost(m.Host), m.Port)), fmt.Sprintf("--log-file=%s", m.LogFile()), } endpoints := make([]string, 0) for _, master := range m.initEndpoints { endpoints = append(endpoints, fmt.Sprintf("%s=http://%s", master.Name(), utils.JoinHostPort(master.Host, master.Port))) } args = append(args, fmt.Sprintf("--initial-cluster=%s", strings.Join(endpoints, ","))) if m.ConfigPath != "" { args = append(args, fmt.Sprintf("--config=%s", m.ConfigPath)) } m.Process = &process{cmd: PrepareCommand(ctx, m.BinPath, args, nil, m.Dir)} logIfErr(m.Process.SetOutputFile(m.LogFile())) return m.Process.Start() } // SetInitEndpoints set the initial endpoints for the DM master. func (m *DMMaster) SetInitEndpoints(endpoints []*DMMaster) { m.initEndpoints = endpoints } // Component return the component of the instance. func (m *DMMaster) Component() string { return "dm-master" } // LogFile return the log file path of the instance. func (m *DMMaster) LogFile() string { return filepath.Join(m.Dir, "dm-master.log") } // Addr return the address of the instance. func (m *DMMaster) Addr() string { return utils.JoinHostPort(m.Host, m.StatusPort) } tiup-1.16.3/components/playground/instance/dm_worker.go000066400000000000000000000037241505422223000232610ustar00rootroot00000000000000package instance import ( "context" "fmt" "path/filepath" "strings" "github.com/pingcap/tiup/pkg/utils" ) // DMWorker represent a DM worker instance. type DMWorker struct { instance Process masters []*DMMaster } var _ Instance = &DMWorker{} // NewDMWorker create a DMWorker instance. func NewDMWorker(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, port int, masters []*DMMaster) *DMWorker { if port <= 0 { port = 8262 } return &DMWorker{ instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, port, shOpt.PortOffset), ConfigPath: configPath, }, masters: masters, } } // MasterAddrs return the master addresses. func (w *DMWorker) MasterAddrs() []string { var addrs []string for _, master := range w.masters { addrs = append(addrs, utils.JoinHostPort(AdvertiseHost(master.Host), master.StatusPort)) } return addrs } // Name return the name of the instance. func (w *DMWorker) Name() string { return fmt.Sprintf("dm-worker-%d", w.ID) } // Start starts the instance. func (w *DMWorker) Start(ctx context.Context) error { args := []string{ fmt.Sprintf("--name=%s", w.Name()), fmt.Sprintf("--worker-addr=%s", utils.JoinHostPort(w.Host, w.Port)), fmt.Sprintf("--advertise-addr=%s", utils.JoinHostPort(AdvertiseHost(w.Host), w.Port)), fmt.Sprintf("--join=%s", strings.Join(w.MasterAddrs(), ",")), fmt.Sprintf("--log-file=%s", w.LogFile()), } if w.ConfigPath != "" { args = append(args, fmt.Sprintf("--config=%s", w.ConfigPath)) } w.Process = &process{cmd: PrepareCommand(ctx, w.BinPath, args, nil, w.Dir)} logIfErr(w.Process.SetOutputFile(w.LogFile())) return w.Process.Start() } // Component return the component of the instance. func (w *DMWorker) Component() string { return "dm-worker" } // LogFile return the log file of the instance. func (w *DMWorker) LogFile() string { return filepath.Join(w.Dir, "dm-worker.log") } tiup-1.16.3/components/playground/instance/drainer.go000066400000000000000000000045041505422223000227110ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "path/filepath" "strings" "github.com/pingcap/tiup/pkg/utils" ) // Drainer represent a drainer instance. type Drainer struct { instance pds []*PDInstance Process } var _ Instance = &Drainer{} // NewDrainer create a Drainer instance. func NewDrainer(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, pds []*PDInstance) *Drainer { d := &Drainer{ instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, 8250, shOpt.PortOffset), ConfigPath: configPath, }, pds: pds, } d.StatusPort = d.Port return d } // Component return component name. func (d *Drainer) Component() string { return "drainer" } // LogFile return the log file name. func (d *Drainer) LogFile() string { return filepath.Join(d.Dir, "drainer.log") } // Addr return the address of Drainer. func (d *Drainer) Addr() string { return utils.JoinHostPort(AdvertiseHost(d.Host), d.Port) } // NodeID return the node id of drainer. func (d *Drainer) NodeID() string { return fmt.Sprintf("drainer_%d", d.ID) } // Start implements Instance interface. func (d *Drainer) Start(ctx context.Context) error { endpoints := pdEndpoints(d.pds, true) args := []string{ fmt.Sprintf("--node-id=%s", d.NodeID()), fmt.Sprintf("--addr=%s", utils.JoinHostPort(d.Host, d.Port)), fmt.Sprintf("--advertise-addr=%s", utils.JoinHostPort(AdvertiseHost(d.Host), d.Port)), fmt.Sprintf("--pd-urls=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--log-file=%s", d.LogFile()), } if d.ConfigPath != "" { args = append(args, fmt.Sprintf("--config=%s", d.ConfigPath)) } d.Process = &process{cmd: PrepareCommand(ctx, d.BinPath, args, nil, d.Dir)} logIfErr(d.Process.SetOutputFile(d.LogFile())) return d.Process.Start() } tiup-1.16.3/components/playground/instance/instance.go000066400000000000000000000136641505422223000231000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "net" "os" "path/filepath" "github.com/BurntSushi/toml" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" tiupexec "github.com/pingcap/tiup/pkg/exec" "github.com/pingcap/tiup/pkg/tui/colorstr" "github.com/pingcap/tiup/pkg/utils" ) // Config of the instance. type Config struct { ConfigPath string `yaml:"config_path"` BinPath string `yaml:"bin_path"` Num int `yaml:"num"` Host string `yaml:"host"` Port int `yaml:"port"` UpTimeout int `yaml:"up_timeout"` Version string `yaml:"version"` } // SharedOptions contains some commonly used, tunable options for most components. // Unlike Config, these options are shared for all instances of all components. type SharedOptions struct { /// Whether or not to tune the cluster in order to run faster (instead of easier to debug). HighPerf bool `yaml:"high_perf"` CSE CSEOptions `yaml:"cse"` // Only available when mode == tidb-cse or tiflash-disagg PDMode string `yaml:"pd_mode"` Mode string `yaml:"mode"` PortOffset int `yaml:"port_offset"` EnableTiKVColumnar bool `yaml:"enable_tikv_columnar"` // Only available when mode == tidb-cse } // CSEOptions contains configs to run TiDB cluster in CSE mode. type CSEOptions struct { S3Endpoint string `yaml:"s3_endpoint"` Bucket string `yaml:"bucket"` AccessKey string `yaml:"access_key"` SecretKey string `yaml:"secret_key"` } type instance struct { ID int Dir string Host string Port int StatusPort int // client port for PD ConfigPath string BinPath string Version utils.Version } // MetricAddr will be used by prometheus scrape_configs. type MetricAddr struct { Targets []string `json:"targets"` Labels map[string]string `json:"labels"` } // Instance represent running component type Instance interface { Pid() int // Start the instance process. // Will kill the process once the context is done. Start(ctx context.Context) error // Component Return the component name. Component() string // LogFile return the log file name LogFile() string // Uptime show uptime. Uptime() string // MetricAddr return the address to pull metrics. MetricAddr() MetricAddr // Wait Should only call this if the instance is started successfully. // The implementation should be safe to call Wait multi times. Wait() error // PrepareBinary use given binpath or download from tiup mirrors. PrepareBinary(binaryName string, componentName string, version utils.Version) error } func (inst *instance) MetricAddr() (r MetricAddr) { if inst.Host != "" && inst.StatusPort != 0 { r.Targets = append(r.Targets, utils.JoinHostPort(inst.Host, inst.StatusPort)) } return } func (inst *instance) PrepareBinary(binaryName string, componentName string, version utils.Version) error { instanceBinPath, err := tiupexec.PrepareBinary(binaryName, version, inst.BinPath) if err != nil { return err } // distinguish whether the instance is started by specific binary path. if inst.BinPath == "" { colorstr.Printf("[dark_gray]Start %s instance: %s[reset]\n", componentName, version) } else { colorstr.Printf("[dark_gray]Start %s instance: %s[reset]\n", componentName, instanceBinPath) } inst.Version = version inst.BinPath = instanceBinPath return nil } // CompVersion return the format to run specified version of a component. func CompVersion(comp string, version utils.Version) string { if version.IsEmpty() { return comp } return fmt.Sprintf("%v:%v", comp, version) } // AdvertiseHost returns the interface's ip addr if listen host is 0.0.0.0 func AdvertiseHost(listen string) string { if listen == "0.0.0.0" { addrs, err := net.InterfaceAddrs() if err != nil || len(addrs) == 0 { return "localhost" } for _, addr := range addrs { if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil { return ip.IP.To4().String() } } return "localhost" } return listen } func logIfErr(err error) { if err != nil { fmt.Println(err) } } func pdEndpoints(pds []*PDInstance, isHTTP bool) []string { var endpoints []string for _, pd := range pds { if pd.role == PDRoleTSO || pd.role == PDRoleScheduling { continue } if isHTTP { endpoints = append(endpoints, "http://"+utils.JoinHostPort(AdvertiseHost(pd.Host), pd.StatusPort)) } else { endpoints = append(endpoints, utils.JoinHostPort(AdvertiseHost(pd.Host), pd.StatusPort)) } } return endpoints } // prepareConfig accepts a user specified config and merge user config with a // pre-defined one. func prepareConfig(outputConfigPath string, userConfigPath string, preDefinedConfig map[string]any) error { dir := filepath.Dir(outputConfigPath) if err := utils.MkdirAll(dir, 0755); err != nil { return err } userConfig, err := unmarshalConfig(userConfigPath) if err != nil { return errors.Trace(err) } if userConfig == nil { userConfig = make(map[string]any) } cf, err := os.Create(outputConfigPath) if err != nil { return errors.Trace(err) } enc := toml.NewEncoder(cf) enc.Indent = "" return enc.Encode(spec.MergeConfig(preDefinedConfig, userConfig)) } func unmarshalConfig(path string) (map[string]any, error) { if path == "" { return nil, nil } data, err := os.ReadFile(path) if err != nil { return nil, err } c := make(map[string]any) err = toml.Unmarshal(data, &c) if err != nil { return nil, err } return c, nil } tiup-1.16.3/components/playground/instance/pd.go000066400000000000000000000142701505422223000216710ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "path/filepath" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // PDRole is the role of PD. type PDRole string const ( // PDRoleNormal is the default role of PD PDRoleNormal PDRole = "pd" // PDRoleAPI is the role of PD API PDRoleAPI PDRole = "api" // PDRoleTSO is the role of PD TSO PDRoleTSO PDRole = "tso" // PDRoleScheduling is the role of PD scheduling PDRoleScheduling PDRole = "scheduling" ) // PDInstance represent a running pd-server type PDInstance struct { instance shOpt SharedOptions role PDRole initEndpoints []*PDInstance joinEndpoints []*PDInstance pds []*PDInstance Process kvIsSingleReplica bool } // NewPDInstance return a PDInstance func NewPDInstance(role PDRole, shOpt SharedOptions, binPath, dir, host, configPath string, id int, pds []*PDInstance, port int, kvIsSingleReplica bool) *PDInstance { if port <= 0 { port = 2379 } return &PDInstance{ shOpt: shOpt, instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, 2380, shOpt.PortOffset), StatusPort: utils.MustGetFreePort(host, port, shOpt.PortOffset), ConfigPath: configPath, }, role: role, pds: pds, kvIsSingleReplica: kvIsSingleReplica, } } // Join set endpoints field of PDInstance func (inst *PDInstance) Join(pds []*PDInstance) *PDInstance { inst.joinEndpoints = pds return inst } // InitCluster set the init cluster instance. func (inst *PDInstance) InitCluster(pds []*PDInstance) *PDInstance { inst.initEndpoints = pds return inst } // Name return the name of pd. func (inst *PDInstance) Name() string { switch inst.role { case PDRoleTSO: return fmt.Sprintf("tso-%d", inst.ID) case PDRoleScheduling: return fmt.Sprintf("scheduling-%d", inst.ID) default: return fmt.Sprintf("pd-%d", inst.ID) } } // Start calls set inst.cmd and Start func (inst *PDInstance) Start(ctx context.Context) error { configPath := filepath.Join(inst.Dir, "pd.toml") if err := prepareConfig( configPath, inst.ConfigPath, inst.getConfig(), ); err != nil { return err } uid := inst.Name() var args []string switch inst.role { case PDRoleNormal, PDRoleAPI: if inst.role == PDRoleAPI { args = []string{"services", "api"} } args = append(args, []string{ "--name=" + uid, fmt.Sprintf("--config=%s", configPath), fmt.Sprintf("--data-dir=%s", filepath.Join(inst.Dir, "data")), fmt.Sprintf("--peer-urls=http://%s", utils.JoinHostPort(inst.Host, inst.Port)), fmt.Sprintf("--advertise-peer-urls=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.Port)), fmt.Sprintf("--client-urls=http://%s", utils.JoinHostPort(inst.Host, inst.StatusPort)), fmt.Sprintf("--advertise-client-urls=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.StatusPort)), fmt.Sprintf("--log-file=%s", inst.LogFile()), }...) switch { case len(inst.initEndpoints) > 0: endpoints := make([]string, 0) for _, pd := range inst.initEndpoints { uid := fmt.Sprintf("pd-%d", pd.ID) endpoints = append(endpoints, fmt.Sprintf("%s=http://%s", uid, utils.JoinHostPort(AdvertiseHost(inst.Host), pd.Port))) } args = append(args, fmt.Sprintf("--initial-cluster=%s", strings.Join(endpoints, ","))) case len(inst.joinEndpoints) > 0: endpoints := make([]string, 0) for _, pd := range inst.joinEndpoints { endpoints = append(endpoints, fmt.Sprintf("http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), pd.Port))) } args = append(args, fmt.Sprintf("--join=%s", strings.Join(endpoints, ","))) default: return errors.Errorf("must set the init or join instances") } case PDRoleTSO: endpoints := pdEndpoints(inst.pds, true) args = []string{ "services", "tso", fmt.Sprintf("--listen-addr=http://%s", utils.JoinHostPort(inst.Host, inst.StatusPort)), fmt.Sprintf("--advertise-listen-addr=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.StatusPort)), fmt.Sprintf("--backend-endpoints=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--log-file=%s", inst.LogFile()), fmt.Sprintf("--config=%s", configPath), } if tidbver.PDSupportMicroservicesWithName(inst.Version.String()) { args = append(args, fmt.Sprintf("--name=%s", uid)) } case PDRoleScheduling: endpoints := pdEndpoints(inst.pds, true) args = []string{ "services", "scheduling", fmt.Sprintf("--listen-addr=http://%s", utils.JoinHostPort(inst.Host, inst.StatusPort)), fmt.Sprintf("--advertise-listen-addr=http://%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.StatusPort)), fmt.Sprintf("--backend-endpoints=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--log-file=%s", inst.LogFile()), fmt.Sprintf("--config=%s", configPath), } if tidbver.PDSupportMicroservicesWithName(inst.Version.String()) { args = append(args, fmt.Sprintf("--name=%s", uid)) } } inst.Process = &process{cmd: PrepareCommand(ctx, inst.BinPath, args, nil, inst.Dir)} logIfErr(inst.Process.SetOutputFile(inst.LogFile())) return inst.Process.Start() } // Component return the component name. func (inst *PDInstance) Component() string { if inst.role == PDRoleNormal || inst.role == PDRoleAPI { return "pd" } return string(inst.role) } // LogFile return the log file. func (inst *PDInstance) LogFile() string { if inst.role == PDRoleNormal || inst.role == PDRoleAPI { return filepath.Join(inst.Dir, "pd.log") } return filepath.Join(inst.Dir, fmt.Sprintf("%s.log", string(inst.role))) } // Addr return the listen address of PD func (inst *PDInstance) Addr() string { return utils.JoinHostPort(AdvertiseHost(inst.Host), inst.StatusPort) } tiup-1.16.3/components/playground/instance/pd_config.go000066400000000000000000000020331505422223000232100ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance func (inst *PDInstance) getConfig() map[string]any { config := make(map[string]any) config["schedule.patrol-region-interval"] = "100ms" config["schedule.low-space-ratio"] = 1.0 if inst.kvIsSingleReplica { config["replication.max-replicas"] = 1 } if inst.shOpt.Mode == "tidb-cse" { config["keyspace.pre-alloc"] = []string{"mykeyspace"} config["replication.enable-placement-rules"] = true config["schedule.merge-schedule-limit"] = 0 config["schedule.replica-schedule-limit"] = 500 } return config } tiup-1.16.3/components/playground/instance/proc_attr_default.go000066400000000000000000000012471505422223000247670ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // //go:build !linux // +build !linux package instance import "syscall" // SysProcAttr to be use for every Process we start. var SysProcAttr *syscall.SysProcAttr tiup-1.16.3/components/playground/instance/proc_attr_linux.go000066400000000000000000000013311505422223000244740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // //go:build linux // +build linux package instance import "syscall" // SysProcAttr to be use for every Process we start. var SysProcAttr = &syscall.SysProcAttr{ Pdeathsig: syscall.SIGKILL, Setpgid: true, } tiup-1.16.3/components/playground/instance/process.go000066400000000000000000000037511505422223000227460ustar00rootroot00000000000000package instance import ( "context" "io" "os" "os/exec" "sync" "time" "github.com/pingcap/errors" ) var ( errNotUp = errors.New("not up") ) // Process represent process to be run by playground type Process interface { Start() error Wait() error Pid() int Uptime() string SetOutputFile(fname string) error Cmd() *exec.Cmd } // process implements Process type process struct { cmd *exec.Cmd startTime time.Time waitOnce sync.Once waitErr error } // Start the process func (p *process) Start() error { if p == nil { return errNotUp } // fmt.Printf("Starting `%s`: %s", filepath.Base(p.cmd.Path), strings.Join(p.cmd.Args, " ")) p.startTime = time.Now() return p.cmd.Start() } // Wait implements Instance interface. func (p *process) Wait() error { if p == nil { return errNotUp } p.waitOnce.Do(func() { p.waitErr = p.cmd.Wait() }) return p.waitErr } // Pid implements Instance interface. func (p *process) Pid() int { if p == nil { return 0 } return p.cmd.Process.Pid } // Uptime implements Instance interface. func (p *process) Uptime() string { if p == nil { return errNotUp.Error() } s := p.cmd.ProcessState if s != nil { return s.String() } duration := time.Since(p.startTime) return duration.String() } func (p *process) SetOutputFile(fname string) error { if p == nil { return errNotUp } f, err := os.OpenFile(fname, os.O_RDWR|os.O_CREATE, 0666) if err != nil { return errors.AddStack(err) } p.setOutput(f) return nil } func (p *process) setOutput(w io.Writer) { if p == nil { return } p.cmd.Stdout = w p.cmd.Stderr = w } func (p *process) Cmd() *exec.Cmd { if p == nil { panic(errNotUp) } return p.cmd } // PrepareCommand return command for playground func PrepareCommand(ctx context.Context, binPath string, args, envs []string, workDir string) *exec.Cmd { c := exec.CommandContext(ctx, binPath, args...) c.Env = os.Environ() c.Env = append(c.Env, envs...) c.Dir = workDir c.SysProcAttr = SysProcAttr return c } tiup-1.16.3/components/playground/instance/pump.go000066400000000000000000000054111505422223000222440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "net/http" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/utils" ) // Pump represent a pump instance. type Pump struct { instance pds []*PDInstance Process } var _ Instance = &Pump{} // NewPump create a Pump instance. func NewPump(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, pds []*PDInstance) *Pump { pump := &Pump{ instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, 8249, shOpt.PortOffset), ConfigPath: configPath, }, pds: pds, } pump.StatusPort = pump.Port return pump } // NodeID return the node id of pump. func (p *Pump) NodeID() string { return fmt.Sprintf("pump_%d", p.ID) } // Ready return nil when pump is ready to serve. func (p *Pump) Ready(ctx context.Context) error { url := fmt.Sprintf("http://%s/status", utils.JoinHostPort(p.Host, p.Port)) ready := func() bool { resp, err := http.Get(url) if err != nil { return false } defer resp.Body.Close() return resp.StatusCode == 200 } for { if ready() { return nil } select { case <-ctx.Done(): return ctx.Err() case <-time.After(time.Second): // just retry } } } // Addr return the address of Pump. func (p *Pump) Addr() string { return utils.JoinHostPort(AdvertiseHost(p.Host), p.Port) } // Start implements Instance interface. func (p *Pump) Start(ctx context.Context) error { endpoints := pdEndpoints(p.pds, true) args := []string{ fmt.Sprintf("--node-id=%s", p.NodeID()), fmt.Sprintf("--addr=%s", utils.JoinHostPort(p.Host, p.Port)), fmt.Sprintf("--advertise-addr=%s", utils.JoinHostPort(AdvertiseHost(p.Host), p.Port)), fmt.Sprintf("--pd-urls=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--log-file=%s", p.LogFile()), } if p.ConfigPath != "" { args = append(args, fmt.Sprintf("--config=%s", p.ConfigPath)) } p.Process = &process{cmd: PrepareCommand(ctx, p.BinPath, args, nil, p.Dir)} logIfErr(p.Process.SetOutputFile(p.LogFile())) return p.Process.Start() } // Component return component name. func (p *Pump) Component() string { return "pump" } // LogFile return the log file. func (p *Pump) LogFile() string { return filepath.Join(p.Dir, "pump.log") } tiup-1.16.3/components/playground/instance/ticdc.go000066400000000000000000000046641505422223000223620ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "path/filepath" "strings" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // TiCDC represent a ticdc instance. type TiCDC struct { instance pds []*PDInstance Process } var _ Instance = &TiCDC{} // NewTiCDC create a TiCDC instance. func NewTiCDC(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, port int, pds []*PDInstance) *TiCDC { if port <= 0 { port = 8300 } ticdc := &TiCDC{ instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, port, shOpt.PortOffset), ConfigPath: configPath, }, pds: pds, } ticdc.StatusPort = ticdc.Port return ticdc } // Start implements Instance interface. func (c *TiCDC) Start(ctx context.Context) error { endpoints := pdEndpoints(c.pds, true) args := []string{ "server", fmt.Sprintf("--addr=%s", utils.JoinHostPort(c.Host, c.Port)), fmt.Sprintf("--advertise-addr=%s", utils.JoinHostPort(AdvertiseHost(c.Host), c.Port)), fmt.Sprintf("--pd=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--log-file=%s", c.LogFile()), } clusterVersion := string(c.Version) if tidbver.TiCDCSupportConfigFile(clusterVersion) { if c.ConfigPath != "" { args = append(args, fmt.Sprintf("--config=%s", c.ConfigPath)) } if tidbver.TiCDCSupportDataDir(clusterVersion) { args = append(args, fmt.Sprintf("--data-dir=%s", filepath.Join(c.Dir, "data"))) } else { args = append(args, fmt.Sprintf("--sort-dir=%s/tmp/sorter", filepath.Join(c.Dir, "data"))) } } c.Process = &process{cmd: PrepareCommand(ctx, c.BinPath, args, nil, c.Dir)} logIfErr(c.Process.SetOutputFile(c.LogFile())) return c.Process.Start() } // Component return component name. func (c *TiCDC) Component() string { return "cdc" } // LogFile return the log file. func (c *TiCDC) LogFile() string { return filepath.Join(c.Dir, "ticdc.log") } tiup-1.16.3/components/playground/instance/tidb.go000066400000000000000000000053051505422223000222070ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "path/filepath" "strconv" "strings" "github.com/pingcap/tiup/pkg/utils" ) // TiDBInstance represent a running tidb-server type TiDBInstance struct { instance shOpt SharedOptions pds []*PDInstance Process tiproxyCertDir string enableBinlog bool } // NewTiDBInstance return a TiDBInstance func NewTiDBInstance(shOpt SharedOptions, binPath string, dir, host, configPath string, id, port int, pds []*PDInstance, tiproxyCertDir string, enableBinlog bool) *TiDBInstance { if port <= 0 { port = 4000 } return &TiDBInstance{ shOpt: shOpt, instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, port, shOpt.PortOffset), StatusPort: utils.MustGetFreePort("0.0.0.0", 10080, shOpt.PortOffset), ConfigPath: configPath, }, tiproxyCertDir: tiproxyCertDir, pds: pds, enableBinlog: enableBinlog, } } // Start calls set inst.cmd and Start func (inst *TiDBInstance) Start(ctx context.Context) error { configPath := filepath.Join(inst.Dir, "tidb.toml") if err := prepareConfig( configPath, inst.ConfigPath, inst.getConfig(), ); err != nil { return err } endpoints := pdEndpoints(inst.pds, false) args := []string{ "-P", strconv.Itoa(inst.Port), "--store=tikv", fmt.Sprintf("--host=%s", inst.Host), fmt.Sprintf("--status=%d", inst.StatusPort), fmt.Sprintf("--path=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--log-file=%s", filepath.Join(inst.Dir, "tidb.log")), fmt.Sprintf("--config=%s", configPath), } if inst.enableBinlog { args = append(args, "--enable-binlog=true") } inst.Process = &process{cmd: PrepareCommand(ctx, inst.BinPath, args, nil, inst.Dir)} logIfErr(inst.Process.SetOutputFile(inst.LogFile())) return inst.Process.Start() } // Component return the component name. func (inst *TiDBInstance) Component() string { return "tidb" } // LogFile return the log file name. func (inst *TiDBInstance) LogFile() string { return filepath.Join(inst.Dir, "tidb.log") } // Addr return the listen address of TiDB func (inst *TiDBInstance) Addr() string { return utils.JoinHostPort(AdvertiseHost(inst.Host), inst.Port) } tiup-1.16.3/components/playground/instance/tidb_config.go000066400000000000000000000041571505422223000235400ustar00rootroot00000000000000// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "os" "path/filepath" ) func (inst *TiDBInstance) getConfig() map[string]any { config := make(map[string]any) config["security.auto-tls"] = true if inst.shOpt.Mode == "tidb-cse" { config["keyspace-name"] = "mykeyspace" config["enable-safe-point-v2"] = true config["force-enable-vector-type"] = true config["use-autoscaler"] = false config["disaggregated-tiflash"] = true config["ratelimit.full-speed"] = 1048576000 config["ratelimit.full-speed-capacity"] = 1048576000 config["ratelimit.low-speed-watermark"] = uint64(1048576000000) config["ratelimit.block-write-watermark"] = uint64(1048576000000) config["security.enable-sem"] = false config["tiflash-replicas.constraints"] = []any{ map[string]any{ "key": "engine", "op": "in", "values": []string{ "tiflash", }, }, map[string]any{ "key": "engine_role", "op": "in", "values": []string{ "write", }, }, } config["tiflash-replicas.group-id"] = "enable_s3_wn_region" config["tiflash-replicas.extra-s3-rule"] = false config["tiflash-replicas.min-count"] = 1 } else if inst.shOpt.Mode == "tiflash-disagg" { config["use-autoscaler"] = false config["disaggregated-tiflash"] = true } tiproxyCrtPath := filepath.Join(inst.tiproxyCertDir, "tiproxy.crt") tiproxyKeyPath := filepath.Join(inst.tiproxyCertDir, "tiproxy.key") _, err1 := os.Stat(tiproxyCrtPath) _, err2 := os.Stat(tiproxyKeyPath) if err1 == nil && err2 == nil { config["security.session-token-signing-cert"] = tiproxyCrtPath config["security.session-token-signing-key"] = tiproxyKeyPath } return config } tiup-1.16.3/components/playground/instance/tiflash.go000066400000000000000000000142101505422223000227120ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // TiFlashRole is the role of TiFlash. type TiFlashRole string const ( // TiFlashRoleNormal is used when TiFlash is not in disaggregated mode. TiFlashRoleNormal TiFlashRole = "normal" // TiFlashRoleDisaggWrite is used when TiFlash is in disaggregated mode and is the write node. TiFlashRoleDisaggWrite TiFlashRole = "write" // TiFlashRoleDisaggCompute is used when TiFlash is in disaggregated mode and is the compute node. TiFlashRoleDisaggCompute TiFlashRole = "compute" ) // TiFlashInstance represent a running TiFlash type TiFlashInstance struct { instance Role TiFlashRole // Used in wait routine, so it is public shOpt SharedOptions tcpPort int servicePort int proxyPort int proxyStatusPort int pds []*PDInstance dbs []*TiDBInstance Process } // NewTiFlashInstance return a TiFlashInstance func NewTiFlashInstance(role TiFlashRole, shOpt SharedOptions, binPath, dir, host, configPath string, id int, pds []*PDInstance, dbs []*TiDBInstance, version string) *TiFlashInstance { if role != TiFlashRoleNormal && role != TiFlashRoleDisaggWrite && role != TiFlashRoleDisaggCompute { panic(fmt.Sprintf("Unknown TiFlash role %s", role)) } if (role == TiFlashRoleDisaggCompute || role == TiFlashRoleDisaggWrite) && shOpt.Mode != "tidb-cse" && shOpt.Mode != "tiflash-disagg" { panic(fmt.Sprintf("Unsupported disagg role in mode %s", shOpt.Mode)) } httpPort := 8123 if !tidbver.TiFlashNotNeedHTTPPortConfig(version) { httpPort = utils.MustGetFreePort(host, httpPort, shOpt.PortOffset) } return &TiFlashInstance{ shOpt: shOpt, instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: httpPort, StatusPort: utils.MustGetFreePort(host, 8234, shOpt.PortOffset), ConfigPath: configPath, }, Role: role, tcpPort: utils.MustGetFreePort(host, 9100, shOpt.PortOffset), // 9000 for default object store port servicePort: utils.MustGetFreePort(host, 3930, shOpt.PortOffset), proxyPort: utils.MustGetFreePort(host, 20170, shOpt.PortOffset), proxyStatusPort: utils.MustGetFreePort(host, 20292, shOpt.PortOffset), pds: pds, dbs: dbs, } } // Addr return the address of tiflash func (inst *TiFlashInstance) Addr() string { return utils.JoinHostPort(AdvertiseHost(inst.Host), inst.servicePort) } // MetricAddr implements Instance interface. func (inst *TiFlashInstance) MetricAddr() (r MetricAddr) { r.Targets = append(r.Targets, utils.JoinHostPort(inst.Host, inst.StatusPort)) r.Targets = append(r.Targets, utils.JoinHostPort(inst.Host, inst.proxyStatusPort)) return } // Start calls set inst.cmd and Start func (inst *TiFlashInstance) Start(ctx context.Context) error { if !tidbver.TiFlashPlaygroundNewStartMode(inst.Version.String()) { return inst.startOld(ctx, inst.Version) } proxyConfigPath := filepath.Join(inst.Dir, "tiflash_proxy.toml") if err := prepareConfig( proxyConfigPath, "", inst.getProxyConfig(), ); err != nil { return err } configPath := filepath.Join(inst.Dir, "tiflash.toml") if err := prepareConfig( configPath, inst.ConfigPath, inst.getConfig(), ); err != nil { return err } endpoints := pdEndpoints(inst.pds, false) args := []string{ "server", fmt.Sprintf("--config-file=%s", configPath), "--", } runtimeConfig := [][]string{ {"path", filepath.Join(inst.Dir, "data")}, {"listen_host", inst.Host}, {"logger.log", inst.LogFile()}, {"logger.errorlog", filepath.Join(inst.Dir, "tiflash_error.log")}, {"status.metrics_port", fmt.Sprintf("%d", inst.StatusPort)}, {"flash.service_addr", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.servicePort)}, {"raft.pd_addr", strings.Join(endpoints, ",")}, {"flash.proxy.addr", utils.JoinHostPort(inst.Host, inst.proxyPort)}, {"flash.proxy.advertise-addr", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.proxyPort)}, {"flash.proxy.status-addr", utils.JoinHostPort(inst.Host, inst.proxyStatusPort)}, {"flash.proxy.data-dir", filepath.Join(inst.Dir, "proxy_data")}, {"flash.proxy.log-file", filepath.Join(inst.Dir, "tiflash_tikv.log")}, } userConfig, err := unmarshalConfig(configPath) if err != nil { return errors.Trace(err) } for _, arg := range runtimeConfig { // if user has set the config, skip it if !isKeyPresentInMap(userConfig, arg[0]) { args = append(args, fmt.Sprintf("--%s=%s", arg[0], arg[1])) } } inst.Process = &process{cmd: PrepareCommand(ctx, inst.BinPath, args, nil, inst.Dir)} logIfErr(inst.Process.SetOutputFile(inst.LogFile())) return inst.Process.Start() } func isKeyPresentInMap(m map[string]any, key string) bool { keys := strings.Split(key, ".") currentMap := m for i := range keys { if _, ok := currentMap[keys[i]]; !ok { return false } // If the current value is a nested map, update the current map to the nested map if innerMap, ok := currentMap[keys[i]].(map[string]any); ok { currentMap = innerMap } } return true } // Component return the component name. func (inst *TiFlashInstance) Component() string { return "tiflash" } // LogFile return the log file name. func (inst *TiFlashInstance) LogFile() string { return filepath.Join(inst.Dir, "tiflash.log") } // Cmd returns the internal Cmd instance func (inst *TiFlashInstance) Cmd() *exec.Cmd { return inst.Process.Cmd() } // StoreAddr return the store address of TiFlash func (inst *TiFlashInstance) StoreAddr() string { return utils.JoinHostPort(AdvertiseHost(inst.Host), inst.servicePort) } tiup-1.16.3/components/playground/instance/tiflash_config.go000066400000000000000000000072151505422223000242460ustar00rootroot00000000000000// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import "path/filepath" func (inst *TiFlashInstance) getProxyConfig() map[string]any { config := make(map[string]any) config["rocksdb.max-open-files"] = 256 config["raftdb.max-open-files"] = 256 config["storage.reserve-space"] = 0 config["storage.reserve-raft-space"] = 0 if inst.Role == TiFlashRoleDisaggWrite { if inst.shOpt.Mode == "tidb-cse" { config["storage.api-version"] = 2 config["storage.enable-ttl"] = true config["dfs.prefix"] = "tikv" config["dfs.s3-endpoint"] = inst.shOpt.CSE.S3Endpoint config["dfs.s3-key-id"] = inst.shOpt.CSE.AccessKey config["dfs.s3-secret-key"] = inst.shOpt.CSE.SecretKey config["dfs.s3-bucket"] = inst.shOpt.CSE.Bucket config["dfs.s3-region"] = "local" } } // If TiKVColumnar is enabled, TiFlash Proxy need to know how to access S3 as well. if inst.Role == TiFlashRoleDisaggCompute && inst.shOpt.Mode == "tidb-cse" && inst.shOpt.EnableTiKVColumnar { config["dfs.prefix"] = "tikv" config["dfs.s3-endpoint"] = inst.shOpt.CSE.S3Endpoint config["dfs.s3-key-id"] = inst.shOpt.CSE.AccessKey config["dfs.s3-secret-key"] = inst.shOpt.CSE.SecretKey config["dfs.s3-bucket"] = inst.shOpt.CSE.Bucket config["dfs.s3-region"] = "local" } return config } func (inst *TiFlashInstance) getConfig() map[string]any { config := make(map[string]any) config["flash.proxy.config"] = filepath.Join(inst.Dir, "tiflash_proxy.toml") config["logger.level"] = "debug" if inst.Role == TiFlashRoleDisaggWrite { config["storage.s3.endpoint"] = inst.shOpt.CSE.S3Endpoint config["storage.s3.bucket"] = inst.shOpt.CSE.Bucket config["storage.s3.root"] = "/tiflash-cse/" config["storage.s3.access_key_id"] = inst.shOpt.CSE.AccessKey config["storage.s3.secret_access_key"] = inst.shOpt.CSE.SecretKey config["storage.main.dir"] = []string{filepath.Join(inst.Dir, "main_data")} config["flash.disaggregated_mode"] = "tiflash_write" if inst.shOpt.Mode == "tidb-cse" { config["enable_safe_point_v2"] = true config["storage.api_version"] = 2 } } else if inst.Role == TiFlashRoleDisaggCompute { config["storage.s3.endpoint"] = inst.shOpt.CSE.S3Endpoint config["storage.s3.bucket"] = inst.shOpt.CSE.Bucket config["storage.s3.root"] = "/tiflash-cse/" config["storage.s3.access_key_id"] = inst.shOpt.CSE.AccessKey config["storage.s3.secret_access_key"] = inst.shOpt.CSE.SecretKey config["storage.remote.cache.dir"] = filepath.Join(inst.Dir, "remote_cache") config["storage.remote.cache.capacity"] = uint64(50000000000) // 50GB config["storage.main.dir"] = []string{filepath.Join(inst.Dir, "main_data")} config["flash.disaggregated_mode"] = "tiflash_compute" if inst.shOpt.Mode == "tidb-cse" { config["enable_safe_point_v2"] = true if inst.shOpt.EnableTiKVColumnar { config["flash.use_columnar"] = true } } } if inst.shOpt.HighPerf { config["logger.level"] = "info" if inst.Role == TiFlashRoleDisaggWrite { config["profiles.default.cpu_thread_count_scale"] = 5.0 } else if inst.Role == TiFlashRoleDisaggCompute { config["profiles.default.task_scheduler_thread_soft_limit"] = 0 config["profiles.default.task_scheduler_thread_hard_limit"] = 0 } } return config } tiup-1.16.3/components/playground/instance/tiflash_pre7.go000066400000000000000000000122141505422223000236510ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "bytes" "context" "encoding/json" "fmt" "path" "path/filepath" "time" "github.com/BurntSushi/toml" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/utils" ) func getFlashClusterPath(dir string) string { return fmt.Sprintf("%s/flash_cluster_manager", dir) } type scheduleConfig struct { LowSpaceRatio float64 `json:"low-space-ratio"` } type replicateMaxReplicaConfig struct { MaxReplicas int `json:"max-replicas"` } type replicateEnablePlacementRulesConfig struct { EnablePlacementRules string `json:"enable-placement-rules"` } // startOld is for < 7.1.0. Not maintained any more. Do not introduce new features. func (inst *TiFlashInstance) startOld(ctx context.Context, version utils.Version) error { endpoints := pdEndpoints(inst.pds, false) tidbStatusAddrs := make([]string, 0, len(inst.dbs)) for _, db := range inst.dbs { tidbStatusAddrs = append(tidbStatusAddrs, utils.JoinHostPort(AdvertiseHost(db.Host), db.StatusPort)) } wd, err := filepath.Abs(inst.Dir) if err != nil { return err } // Wait for PD pdClient := api.NewPDClient(ctx, endpoints, 10*time.Second, nil) // set low-space-ratio to 1 to avoid low disk space lowSpaceRatio, err := json.Marshal(scheduleConfig{ LowSpaceRatio: 0.99, }) if err != nil { return err } if err = pdClient.UpdateScheduleConfig(bytes.NewBuffer(lowSpaceRatio)); err != nil { return err } // Update maxReplicas before placement rules so that it would not be overwritten maxReplicas, err := json.Marshal(replicateMaxReplicaConfig{ MaxReplicas: 1, }) if err != nil { return err } if err = pdClient.UpdateReplicateConfig(bytes.NewBuffer(maxReplicas)); err != nil { return err } // Set enable-placement-rules to allow TiFlash work properly enablePlacementRules, err := json.Marshal(replicateEnablePlacementRulesConfig{ EnablePlacementRules: "true", }) if err != nil { return err } if err = pdClient.UpdateReplicateConfig(bytes.NewBuffer(enablePlacementRules)); err != nil { return err } dirPath := filepath.Dir(inst.BinPath) clusterManagerPath := getFlashClusterPath(dirPath) if err = inst.checkConfigOld(wd, clusterManagerPath, version, tidbStatusAddrs, endpoints); err != nil { return err } args := []string{ "server", fmt.Sprintf("--config-file=%s", inst.ConfigPath), } envs := []string{ fmt.Sprintf("LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH", dirPath), } inst.Process = &process{cmd: PrepareCommand(ctx, inst.BinPath, args, envs, inst.Dir)} logIfErr(inst.Process.SetOutputFile(inst.LogFile())) return inst.Process.Start() } // checkConfigOld is for < 7.1.0. Not maintained any more. Do not introduce new features. func (inst *TiFlashInstance) checkConfigOld(deployDir, clusterManagerPath string, version utils.Version, tidbStatusAddrs, endpoints []string) (err error) { if err := utils.MkdirAll(inst.Dir, 0755); err != nil { return errors.Trace(err) } var ( flashBuf = new(bytes.Buffer) proxyBuf = new(bytes.Buffer) flashCfgPath = path.Join(inst.Dir, "tiflash.toml") proxyCfgPath = path.Join(inst.Dir, "tiflash-learner.toml") ) defer func() { if err != nil { return } if err = utils.WriteFile(flashCfgPath, flashBuf.Bytes(), 0644); err != nil { return } if err = utils.WriteFile(proxyCfgPath, proxyBuf.Bytes(), 0644); err != nil { return } inst.ConfigPath = flashCfgPath }() // Write default config to buffer if err := writeTiFlashConfigOld(flashBuf, version, inst.tcpPort, inst.Port, inst.servicePort, inst.StatusPort, inst.Host, deployDir, clusterManagerPath, tidbStatusAddrs, endpoints); err != nil { return errors.Trace(err) } if err := writeTiFlashProxyConfigOld(proxyBuf, version, inst.Host, deployDir, inst.servicePort, inst.proxyPort, inst.proxyStatusPort); err != nil { return errors.Trace(err) } if inst.ConfigPath == "" { return } cfg, err := unmarshalConfig(inst.ConfigPath) if err != nil { return errors.Trace(err) } proxyPath := getTiFlashProxyConfigPathOld(cfg) if proxyPath != "" { proxyCfg, err := unmarshalConfig(proxyPath) if err != nil { return errors.Trace(err) } err = overwriteBuf(proxyBuf, proxyCfg) if err != nil { return errors.Trace(err) } } // Always use the tiflash proxy config file in the instance directory setTiFlashProxyConfigPathOld(cfg, proxyCfgPath) return errors.Trace(overwriteBuf(flashBuf, cfg)) } func overwriteBuf(buf *bytes.Buffer, overwrite map[string]any) (err error) { cfg := make(map[string]any) if err = toml.Unmarshal(buf.Bytes(), &cfg); err != nil { return } buf.Reset() return toml.NewEncoder(buf).Encode(spec.MergeConfig(cfg, overwrite)) } tiup-1.16.3/components/playground/instance/tiflash_pre7_config.go000066400000000000000000000064451505422223000252070ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "fmt" "io" "strings" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) const tiflashDaemonConfigOld = ` [application] runAsDaemon = true ` const tiflashMarkCacheSizeOld = `mark_cache_size = 5368709120` const tiflashConfigOld = ` default_profile = "default" display_name = "TiFlash" http_port = %[2]d listen_host = "0.0.0.0" tcp_port = %[3]d path = "%[5]s" tmp_path = "%[6]s" %[14]s %[13]s [flash] service_addr = "%[10]s:%[8]d" tidb_status_addr = "%[11]s" [flash.flash_cluster] cluster_manager_path = "%[12]s" log = "%[7]s/tiflash_cluster_manager.log" master_ttl = 60 refresh_interval = 20 update_rule_interval = 5 [flash.proxy] config = "%[4]s/tiflash-learner.toml" [logger] count = 20 errorlog = "%[7]s/tiflash_error.log" level = "debug" log = "%[7]s/tiflash.log" size = "1000M" [profiles] [profiles.default] load_balancing = "random" max_memory_usage = 0 use_uncompressed_cache = 0 [profiles.readonly] readonly = 1 [quotas] [quotas.default] [quotas.default.interval] duration = 3600 errors = 0 execution_time = 0 queries = 0 read_rows = 0 result_rows = 0 [raft] pd_addr = "%[1]s" [status] metrics_port = %[9]d [users] [users.default] password = "" profile = "default" quota = "default" [users.default.networks] ip = "::/0" [users.readonly] password = "" profile = "readonly" quota = "default" [users.readonly.networks] ip = "::/0" ` // writeTiFlashConfigOld is for < 7.1.0. Not maintained any more. Do not introduce new features. func writeTiFlashConfigOld(w io.Writer, version utils.Version, tcpPort, httpPort, servicePort, metricsPort int, host, deployDir, clusterManagerPath string, tidbStatusAddrs, endpoints []string) error { pdAddrs := strings.Join(endpoints, ",") dataDir := fmt.Sprintf("%s/data", deployDir) tmpDir := fmt.Sprintf("%s/tmp", deployDir) logDir := fmt.Sprintf("%s/log", deployDir) ip := AdvertiseHost(host) var conf string if tidbver.TiFlashNotNeedSomeConfig(version.String()) { conf = fmt.Sprintf(tiflashConfigOld, pdAddrs, httpPort, tcpPort, deployDir, dataDir, tmpDir, logDir, servicePort, metricsPort, ip, strings.Join(tidbStatusAddrs, ","), clusterManagerPath, "", "") } else { conf = fmt.Sprintf(tiflashConfigOld, pdAddrs, httpPort, tcpPort, deployDir, dataDir, tmpDir, logDir, servicePort, metricsPort, ip, strings.Join(tidbStatusAddrs, ","), clusterManagerPath, tiflashDaemonConfigOld, tiflashMarkCacheSizeOld) } _, err := w.Write([]byte(conf)) return err } func getTiFlashProxyConfigPathOld(cfg map[string]any) string { defer func() { if r := recover(); r != nil { return } }() return cfg["flash"].(map[string]any)["proxy"].(map[string]any)["config"].(string) } func setTiFlashProxyConfigPathOld(cfg map[string]any, path string) { cfg["flash"].(map[string]any)["proxy"].(map[string]any)["config"] = path } tiup-1.16.3/components/playground/instance/tiflash_pre7_proxy_config.go000066400000000000000000000034151505422223000264420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "fmt" "io" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) const tiflashProxyConfigOld = ` log-file = "%[1]s/tiflash_tikv.log" [rocksdb] wal-dir = "" max-open-files = 256 [security] ca-path = "" cert-path = "" key-path = "" [server] addr = "0.0.0.0:%[4]d" advertise-addr = "%[2]s:%[4]d" engine-addr = "%[2]s:%[3]d" %[5]s [storage] data-dir = "%[6]s" [raftdb] max-open-files = 256 ` // writeTiFlashProxyConfigOld is for < 7.1.0. Not maintained any more. Do not introduce new features. func writeTiFlashProxyConfigOld(w io.Writer, version utils.Version, host, deployDir string, servicePort, proxyPort, proxyStatusPort int) error { // TODO: support multi-dir dataDir := fmt.Sprintf("%s/flash", deployDir) logDir := fmt.Sprintf("%s/log", deployDir) ip := AdvertiseHost(host) var statusAddr string if tidbver.TiFlashSupportAdvertiseStatusAddr(version.String()) { statusAddr = fmt.Sprintf(`status-addr = "0.0.0.0:%[2]d" advertise-status-addr = "%[1]s:%[2]d"`, ip, proxyStatusPort) } else { statusAddr = fmt.Sprintf(`status-addr = "%[1]s:%[2]d"`, ip, proxyStatusPort) } conf := fmt.Sprintf(tiflashProxyConfigOld, logDir, ip, servicePort, proxyPort, statusAddr, dataDir) _, err := w.Write([]byte(conf)) return err } tiup-1.16.3/components/playground/instance/tikv.go000066400000000000000000000065251505422223000222470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/utils" ) // TiKVInstance represent a running tikv-server type TiKVInstance struct { instance shOpt SharedOptions pds []*PDInstance tsos []*PDInstance Process } // NewTiKVInstance return a TiKVInstance func NewTiKVInstance(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, port int, pds []*PDInstance, tsos []*PDInstance) *TiKVInstance { if port <= 0 { port = 20160 } return &TiKVInstance{ shOpt: shOpt, instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, port, shOpt.PortOffset), StatusPort: utils.MustGetFreePort(host, 20180, shOpt.PortOffset), ConfigPath: configPath, }, pds: pds, tsos: tsos, } } // Addr return the address of tikv. func (inst *TiKVInstance) Addr() string { return utils.JoinHostPort(inst.Host, inst.Port) } // Start calls set inst.cmd and Start func (inst *TiKVInstance) Start(ctx context.Context) error { configPath := filepath.Join(inst.Dir, "tikv.toml") if err := prepareConfig( configPath, inst.ConfigPath, inst.getConfig(), ); err != nil { return err } // Need to check tso status if inst.shOpt.PDMode == "ms" { var tsoEnds []string for _, pd := range inst.tsos { tsoEnds = append(tsoEnds, fmt.Sprintf("%s:%d", AdvertiseHost(pd.Host), pd.StatusPort)) } pdcli := api.NewPDClient(ctx, tsoEnds, 10*time.Second, nil, ) if err := pdcli.CheckTSOHealth(&utils.RetryOption{ Delay: time.Second * 5, Timeout: time.Second * 300, }); err != nil { return err } } endpoints := pdEndpoints(inst.pds, true) args := []string{ fmt.Sprintf("--addr=%s", utils.JoinHostPort(inst.Host, inst.Port)), fmt.Sprintf("--advertise-addr=%s", utils.JoinHostPort(AdvertiseHost(inst.Host), inst.Port)), fmt.Sprintf("--status-addr=%s", utils.JoinHostPort(inst.Host, inst.StatusPort)), fmt.Sprintf("--pd-endpoints=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--config=%s", configPath), fmt.Sprintf("--data-dir=%s", filepath.Join(inst.Dir, "data")), fmt.Sprintf("--log-file=%s", inst.LogFile()), } envs := []string{"MALLOC_CONF=prof:true,prof_active:false"} inst.Process = &process{cmd: PrepareCommand(ctx, inst.BinPath, args, envs, inst.Dir)} logIfErr(inst.Process.SetOutputFile(inst.LogFile())) return inst.Process.Start() } // Component return the component name. func (inst *TiKVInstance) Component() string { return "tikv" } // LogFile return the log file name. func (inst *TiKVInstance) LogFile() string { return filepath.Join(inst.Dir, "tikv.log") } // StoreAddr return the store address of TiKV func (inst *TiKVInstance) StoreAddr() string { return utils.JoinHostPort(AdvertiseHost(inst.Host), inst.Port) } tiup-1.16.3/components/playground/instance/tikv_cdc.go000066400000000000000000000041661505422223000230570ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "path/filepath" "strings" "github.com/pingcap/tiup/pkg/utils" ) // TiKVCDC represent a TiKV-CDC instance. type TiKVCDC struct { instance pds []*PDInstance Process } var _ Instance = &TiKVCDC{} // NewTiKVCDC create a TiKVCDC instance. func NewTiKVCDC(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, pds []*PDInstance) *TiKVCDC { tikvCdc := &TiKVCDC{ instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, 8600, shOpt.PortOffset), ConfigPath: configPath, }, pds: pds, } tikvCdc.StatusPort = tikvCdc.Port return tikvCdc } // Start implements Instance interface. func (c *TiKVCDC) Start(ctx context.Context) error { endpoints := pdEndpoints(c.pds, true) args := []string{ "server", fmt.Sprintf("--addr=%s", utils.JoinHostPort(c.Host, c.Port)), fmt.Sprintf("--advertise-addr=%s", utils.JoinHostPort(AdvertiseHost(c.Host), c.Port)), fmt.Sprintf("--pd=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--log-file=%s", c.LogFile()), fmt.Sprintf("--data-dir=%s", filepath.Join(c.Dir, "data")), } if c.ConfigPath != "" { args = append(args, fmt.Sprintf("--config=%s", c.ConfigPath)) } c.Process = &process{cmd: PrepareCommand(ctx, c.BinPath, args, nil, c.Dir)} logIfErr(c.Process.SetOutputFile(c.LogFile())) return c.Process.Start() } // Component return component name. func (c *TiKVCDC) Component() string { return "tikv-cdc" } // LogFile return the log file. func (c *TiKVCDC) LogFile() string { return filepath.Join(c.Dir, "tikv_cdc.log") } tiup-1.16.3/components/playground/instance/tikv_config.go000066400000000000000000000023251505422223000235660ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance func (inst *TiKVInstance) getConfig() map[string]any { config := make(map[string]any) config["rocksdb.max-open-files"] = 256 config["raftdb.max-open-files"] = 256 config["storage.reserve-space"] = 0 config["storage.reserve-raft-space"] = 0 if inst.shOpt.Mode == "tidb-cse" { config["storage.api-version"] = 2 config["storage.enable-ttl"] = true config["dfs.prefix"] = "tikv" config["dfs.s3-endpoint"] = inst.shOpt.CSE.S3Endpoint config["dfs.s3-key-id"] = inst.shOpt.CSE.AccessKey config["dfs.s3-secret-key"] = inst.shOpt.CSE.SecretKey config["dfs.s3-bucket"] = inst.shOpt.CSE.Bucket config["dfs.s3-region"] = "local" config["kvengine.build-columnar"] = true } return config } tiup-1.16.3/components/playground/instance/tikv_worker.go000066400000000000000000000071061505422223000236340ustar00rootroot00000000000000// Copyright 2025 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "fmt" "path/filepath" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/utils" ) // resolveTiKVWorkerBinPath resolves the tikv-worker binary path when tikv-server path is provided. func resolveTiKVWorkerBinPath(binPath string) string { if !strings.HasSuffix(binPath, "tikv-server") { return binPath } dir := filepath.Dir(binPath) return filepath.Join(dir, "tikv-worker") } // TiKVWorkerInstance represent a running TiKVWorker instance. type TiKVWorkerInstance struct { instance shOpt SharedOptions pds []*PDInstance Process } // NewTiKVWorkerInstance creates a new TiKVWorker instance. func NewTiKVWorkerInstance(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, port int, pds []*PDInstance) *TiKVWorkerInstance { if port <= 0 { port = 19000 } return &TiKVWorkerInstance{ shOpt: shOpt, instance: instance{ BinPath: resolveTiKVWorkerBinPath(binPath), ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, port, shOpt.PortOffset), ConfigPath: configPath, }, pds: pds, } } // Addr return the address of TiKVWorker. func (inst *TiKVWorkerInstance) Addr() string { return utils.JoinHostPort(inst.Host, inst.Port) } // Start calls set inst.cmd and Start func (inst *TiKVWorkerInstance) Start(ctx context.Context) error { if inst.shOpt.PDMode == "ms" { return errors.New("tikv_worker does not support ms pd mode") } if inst.shOpt.Mode != "tidb-cse" { return errors.New("tikv_worker only supports tidb-cse mode") } configPath := filepath.Join(inst.Dir, "tikv_worker.toml") if err := prepareConfig( configPath, inst.ConfigPath, inst.getConfig(), ); err != nil { return err } endpoints := pdEndpoints(inst.pds, true) args := []string{ fmt.Sprintf("--addr=%s", utils.JoinHostPort(inst.Host, inst.Port)), fmt.Sprintf("--log-file=%s", inst.LogFile()), fmt.Sprintf("--pd-endpoints=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--config=%s", configPath), } inst.Process = &process{cmd: PrepareCommand(ctx, inst.BinPath, args, nil, inst.Dir)} logIfErr(inst.Process.SetOutputFile(inst.LogFile())) return inst.Process.Start() } // Component return the component name. func (inst *TiKVWorkerInstance) Component() string { return "tikv_worker" } // LogFile return the log file name. func (inst *TiKVWorkerInstance) LogFile() string { return filepath.Join(inst.Dir, "tikv_worker.log") } func (inst *TiKVWorkerInstance) getConfig() map[string]any { config := make(map[string]any) config["dfs.prefix"] = "tikv" config["dfs.s3-endpoint"] = inst.shOpt.CSE.S3Endpoint config["dfs.s3-key-id"] = inst.shOpt.CSE.AccessKey config["dfs.s3-secret-key"] = inst.shOpt.CSE.SecretKey config["dfs.s3-bucket"] = inst.shOpt.CSE.Bucket config["dfs.s3-region"] = "local" config["raft-engine.enabled"] = false config["schema-manager.dir"] = filepath.Join(inst.Dir, "schemas") config["schema-manager.schema-refresh-threshold"] = 1 config["schema-manager.enabled"] = true config["schema-manager.keyspace-refresh-interval"] = "10s" return config } tiup-1.16.3/components/playground/instance/tiproxy.go000066400000000000000000000077661505422223000230200ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package instance import ( "context" "encoding/pem" "fmt" "os" "path/filepath" "strings" "github.com/BurntSushi/toml" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/crypto" "github.com/pingcap/tiup/pkg/utils" ) // TiProxy represent a ticdc instance. type TiProxy struct { instance pds []*PDInstance Process } var _ Instance = &TiProxy{} // GenTiProxySessionCerts will create a self-signed certs for TiProxy session migration. NOTE that this cert is directly used by TiDB. func GenTiProxySessionCerts(dir string) error { if _, err := os.Stat(filepath.Join(dir, "tiproxy.crt")); err == nil { return nil } ca, err := crypto.NewCA("tiproxy") if err != nil { return err } privKey, err := crypto.NewKeyPair(crypto.KeyTypeRSA, crypto.KeySchemeRSASSAPSSSHA256) if err != nil { return err } csr, err := privKey.CSR("tiproxy", "tiproxy", nil, nil) if err != nil { return err } cert, err := ca.Sign(csr) if err != nil { return err } if err := utils.SaveFileWithBackup(filepath.Join(dir, "tiproxy.key"), privKey.Pem(), ""); err != nil { return err } return utils.SaveFileWithBackup(filepath.Join(dir, "tiproxy.crt"), pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: cert, }), "") } // NewTiProxy create a TiProxy instance. func NewTiProxy(shOpt SharedOptions, binPath string, dir, host, configPath string, id int, port int, pds []*PDInstance) *TiProxy { if port <= 0 { port = 6000 } tiproxy := &TiProxy{ instance: instance{ BinPath: binPath, ID: id, Dir: dir, Host: host, Port: utils.MustGetFreePort(host, port, shOpt.PortOffset), StatusPort: utils.MustGetFreePort(host, 3080, shOpt.PortOffset), ConfigPath: configPath, }, pds: pds, } return tiproxy } // MetricAddr implements Instance interface. func (c *TiProxy) MetricAddr() (r MetricAddr) { r.Targets = append(r.Targets, utils.JoinHostPort(c.Host, c.StatusPort)) r.Labels = map[string]string{ "__metrics_path__": "/api/metrics", } return } // Start implements Instance interface. func (c *TiProxy) Start(ctx context.Context) error { endpoints := pdEndpoints(c.pds, false) configPath := filepath.Join(c.Dir, "config", "proxy.toml") dir := filepath.Dir(configPath) if err := utils.MkdirAll(dir, 0755); err != nil { return err } userConfig, err := unmarshalConfig(c.ConfigPath) if err != nil { return err } if userConfig == nil { userConfig = make(map[string]any) } cf, err := os.Create(configPath) if err != nil { return err } enc := toml.NewEncoder(cf) enc.Indent = "" if err := enc.Encode(spec.MergeConfig(userConfig, map[string]any{ "proxy.pd-addrs": strings.Join(endpoints, ","), "proxy.addr": utils.JoinHostPort(c.Host, c.Port), "proxy.advertise-addr": AdvertiseHost(c.Host), "api.addr": utils.JoinHostPort(c.Host, c.StatusPort), "log.log-file.filename": c.LogFile(), })); err != nil { return err } args := []string{ fmt.Sprintf("--config=%s", configPath), } c.Process = &process{cmd: PrepareCommand(ctx, c.BinPath, args, nil, c.Dir)} logIfErr(c.Process.SetOutputFile(c.LogFile())) return c.Process.Start() } // Addr return addresses that can be connected by MySQL clients. func (c *TiProxy) Addr() string { return utils.JoinHostPort(AdvertiseHost(c.Host), c.Port) } // Component return component name. func (c *TiProxy) Component() string { return "tiproxy" } // LogFile return the log file. func (c *TiProxy) LogFile() string { return filepath.Join(c.Dir, "tiproxy.log") } tiup-1.16.3/components/playground/main.go000066400000000000000000000635011505422223000204070ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "net" "net/http" _ "net/http/pprof" "os" "os/signal" "os/user" "path/filepath" "strconv" "strings" "sync/atomic" "syscall" "time" "github.com/fatih/color" _ "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/playground/instance" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/tui/colorstr" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "github.com/spf13/cobra" "github.com/spf13/pflag" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" "golang.org/x/mod/semver" ) // BootOptions is the topology and options used to start a playground cluster type BootOptions struct { ShOpt instance.SharedOptions `yaml:"shared_opt"` Version string `yaml:"version"` PD instance.Config `yaml:"pd"` // will change to api when pd_mode == ms TSO instance.Config `yaml:"tso"` // Only available when pd_mode == ms Scheduling instance.Config `yaml:"scheduling"` // Only available when pd_mode == ms TiProxy instance.Config `yaml:"tiproxy"` TiDB instance.Config `yaml:"tidb"` TiKV instance.Config `yaml:"tikv"` TiFlash instance.Config `yaml:"tiflash"` // ignored when ShOpt.Mode == tidb-cse or tiflash-disagg TiFlashWrite instance.Config `yaml:"tiflash_write"` // Only available when ShOpt.Mode == tidb-cse or tiflash-disagg TiFlashCompute instance.Config `yaml:"tiflash_compute"` // Only available when ShOpt.Mode == tidb-cse or tiflash-disagg TiCDC instance.Config `yaml:"ticdc"` TiKVCDC instance.Config `yaml:"tikv_cdc"` TiKVWorker instance.Config `yaml:"tikv_worker"` // Only available when ShOpt.Mode == tidb-cse Pump instance.Config `yaml:"pump"` Drainer instance.Config `yaml:"drainer"` Host string `yaml:"host"` Monitor bool `yaml:"monitor"` GrafanaPort int `yaml:"grafana_port"` DMMaster instance.Config `yaml:"dm_master"` DMWorker instance.Config `yaml:"dm_worker"` } var ( options = &BootOptions{} tag string deleteWhenExit bool tiupDataDir string dataDir string log = logprinter.NewLogger("") ) func installIfMissing(component, version string) error { env := environment.GlobalEnv() installed, err := env.V1Repository().LocalComponentInstalled(component, version) if err != nil { return err } if installed { return nil } spec := repository.ComponentSpec{ ID: component, Version: version, } return env.V1Repository().UpdateComponents([]repository.ComponentSpec{spec}) } func execute() error { rootCmd := &cobra.Command{ Use: "tiup playground [version]", Long: `Bootstrap a TiDB cluster in your local host, the latest release version will be chosen if you don't specified a version. Examples: $ tiup playground nightly # Start a TiDB nightly version local cluster $ tiup playground v5.0.1 --db 3 --pd 3 --kv 3 # Start a local cluster with 10 nodes $ tiup playground nightly --without-monitor # Start a local cluster and disable monitor system $ tiup playground --pd.config ~/config/pd.toml # Start a local cluster with specified configuration file $ tiup playground --db.binpath /xx/tidb-server # Start a local cluster with component binary path $ tiup playground --tag xx # Start a local cluster with data dir named 'xx' and uncleaned after exit $ tiup playground --mode tikv-slim # Start a local tikv only cluster (No TiDB or TiFlash Available) $ tiup playground --mode tikv-slim --kv 3 --pd 3 # Start a local tikv only cluster with 6 nodes`, SilenceUsage: true, SilenceErrors: true, Version: version.NewTiUPVersion().String(), Args: func(cmd *cobra.Command, args []string) error { return nil }, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { tiupDataDir = os.Getenv(localdata.EnvNameInstanceDataDir) tiupHome := os.Getenv(localdata.EnvNameHome) if tiupHome == "" { tiupHome, _ = getAbsolutePath(filepath.Join("~", localdata.ProfileDirName)) } switch { case tag != "": dataDir = filepath.Join(tiupHome, localdata.DataParentDir, tag) case tiupDataDir != "": dataDir = tiupDataDir tag = dataDir[strings.LastIndex(dataDir, "/")+1:] default: tag = utils.Base62Tag() dataDir = filepath.Join(tiupHome, localdata.DataParentDir, tag) deleteWhenExit = true } err := utils.MkdirAll(dataDir, os.ModePerm) if err != nil { return err } fmt.Printf("\033]0;TiUP Playground: %s\a", tag) return nil }, RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { options.Version = args[0] } if err := populateDefaultOpt(cmd.Flags()); err != nil { return err } port := utils.MustGetFreePort("0.0.0.0", 9527, options.ShOpt.PortOffset) err := dumpPort(filepath.Join(dataDir, "port"), port) p := NewPlayground(dataDir, port) if err != nil { return err } env, err := environment.InitEnv(repository.Options{}, repository.MirrorOptions{}) if err != nil { return err } environment.SetGlobalEnv(env) var booted uint32 ctx, cancel := context.WithCancel(context.Background()) ctx = context.WithValue(ctx, logprinter.ContextKeyLogger, log) defer cancel() go func() { sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, ) sig := (<-sc).(syscall.Signal) atomic.StoreInt32(&p.curSig, int32(sig)) colorstr.Printf("\n[red][bold]Playground receive signal: %s[reset]\n", sig) // if bootCluster is not done we just cancel context to make it // clean up and return ASAP and exit directly after timeout. // Note now bootCluster can not learn the context is done and return quickly now // like while it's downloading component. if atomic.LoadUint32(&booted) == 0 { cancel() time.AfterFunc(time.Second, func() { removeData() os.Exit(0) }) return } go p.terminate(sig) // If user try double ctrl+c, force quit sig = (<-sc).(syscall.Signal) atomic.StoreInt32(&p.curSig, int32(syscall.SIGKILL)) if sig == syscall.SIGINT { p.terminate(syscall.SIGKILL) } }() // expand version string if !semver.IsValid(options.Version) { version, err := env.V1Repository().ResolveComponentVersion(spec.ComponentTiDB, options.Version) if err != nil { return errors.Annotate(err, fmt.Sprintf("Cannot resolve version %s to a valid semver string", options.Version)) } // for nightly, may not use the same version for cluster if options.Version == "nightly" { version = "nightly" } if options.Version != version.String() { colorstr.Fprintf(os.Stderr, ` Note: Version constraint [bold]%s[reset] is resolved to [green][bold]%s[reset]. If you'd like to use other versions: Use exact version: [tiup_command]tiup playground v7.1.0[reset] Use version range: [tiup_command]tiup playground ^5[reset] Use nightly: [tiup_command]tiup playground nightly[reset] `, options.Version, version.String()) } options.Version = version.String() } bootErr := p.bootCluster(ctx, env, options) if bootErr != nil { // always kill all process started and wait before quit. atomic.StoreInt32(&p.curSig, int32(syscall.SIGKILL)) p.terminate(syscall.SIGKILL) _ = p.wait() return errors.Annotate(bootErr, "Playground bootstrapping failed") } atomic.StoreUint32(&booted, 1) waitErr := p.wait() if waitErr != nil { return waitErr } return nil }, } rootCmd.Flags().StringVar(&options.ShOpt.Mode, "mode", "tidb", "TiUP playground mode: 'tidb', 'tidb-cse', 'tiflash-disagg', 'tikv-slim'") rootCmd.Flags().StringVar(&options.ShOpt.PDMode, "pd.mode", "pd", "PD mode: 'pd', 'ms'") rootCmd.Flags().StringVar(&options.ShOpt.CSE.S3Endpoint, "cse.s3_endpoint", "http://127.0.0.1:9000", "Object store URL for --mode=tidb-cse or --mode=tiflash-disagg") rootCmd.Flags().StringVar(&options.ShOpt.CSE.Bucket, "cse.bucket", "tiflash", "Object store bucket for --mode=tidb-cse or --mode=tiflash-disagg") rootCmd.Flags().StringVar(&options.ShOpt.CSE.AccessKey, "cse.access_key", "minioadmin", "Object store access key for --mode=tidb-cse or --mode=tiflash-disagg") rootCmd.Flags().StringVar(&options.ShOpt.CSE.SecretKey, "cse.secret_key", "minioadmin", "Object store secret key for --mode=tidb-cse or --mode=tiflash-disagg") rootCmd.Flags().BoolVar(&options.ShOpt.HighPerf, "perf", false, "Tune default config for better performance instead of debug troubleshooting") rootCmd.Flags().BoolVar(&options.ShOpt.EnableTiKVColumnar, "tikv.columnar", false, "Enable TiKV columnar storage engine, only available when --mode=tidb-cse") rootCmd.PersistentFlags().StringVarP(&tag, "tag", "T", "", "Specify a tag for playground, data dir of this tag will not be removed after exit") rootCmd.Flags().Bool("without-monitor", false, "Don't start prometheus and grafana component") rootCmd.Flags().BoolVar(&options.Monitor, "monitor", true, "Start prometheus and grafana component") _ = rootCmd.Flags().MarkDeprecated("monitor", "Please use --without-monitor to control whether to disable monitor.") rootCmd.Flags().IntVar(&options.GrafanaPort, "grafana.port", 3000, "grafana port. If not provided, grafana will use 3000 as its port.") rootCmd.Flags().IntVar(&options.ShOpt.PortOffset, "port-offset", 0, "If specified, all components will use default_port+port_offset as the port. This argument is useful when you want to start multiple playgrounds on the same host. Recommend to set to 10000, 20000, etc.") // NOTE: Do not set default values if they may be changed in different modes. rootCmd.Flags().IntVar(&options.TiDB.Num, "db", 0, "TiDB instance number") rootCmd.Flags().IntVar(&options.TiKV.Num, "kv", 0, "TiKV instance number") rootCmd.Flags().IntVar(&options.PD.Num, "pd", 0, "PD instance number") rootCmd.Flags().IntVar(&options.TSO.Num, "tso", 0, "TSO instance number") rootCmd.Flags().IntVar(&options.Scheduling.Num, "scheduling", 0, "Scheduling instance number") rootCmd.Flags().IntVar(&options.TiProxy.Num, "tiproxy", 0, "TiProxy instance number") rootCmd.Flags().IntVar(&options.TiFlash.Num, "tiflash", 0, "TiFlash instance number, when --mode=tidb-cse or --mode=tiflash-disagg this will set instance number for both Write Node and Compute Node") rootCmd.Flags().IntVar(&options.TiFlashWrite.Num, "tiflash.write", 0, "TiFlash Write instance number, available when --mode=tidb-cse or --mode=tiflash-disagg, take precedence over --tiflash") rootCmd.Flags().IntVar(&options.TiFlashCompute.Num, "tiflash.compute", 0, "TiFlash Compute instance number, available when --mode=tidb-cse or --mode=tiflash-disagg, take precedence over --tiflash") rootCmd.Flags().IntVar(&options.TiCDC.Num, "ticdc", 0, "TiCDC instance number") rootCmd.Flags().IntVar(&options.TiKVCDC.Num, "kvcdc", 0, "TiKV-CDC instance number") rootCmd.Flags().IntVar(&options.Pump.Num, "pump", 0, "Pump instance number") rootCmd.Flags().IntVar(&options.Drainer.Num, "drainer", 0, "Drainer instance number") rootCmd.Flags().IntVar(&options.DMMaster.Num, "dm-master", 0, "DM-master instance number") rootCmd.Flags().IntVar(&options.DMWorker.Num, "dm-worker", 0, "DM-worker instance number") rootCmd.Flags().IntVar(&options.TiKVWorker.Num, "tikv.worker", 0, "TiKV worker instance number, only available when --mode=tidb-cse. Could be 0 or 1.") rootCmd.Flags().IntVar(&options.TiDB.UpTimeout, "db.timeout", 60, "TiDB max wait time in seconds for starting, 0 means no limit") rootCmd.Flags().IntVar(&options.TiFlash.UpTimeout, "tiflash.timeout", 120, "TiFlash max wait time in seconds for starting, 0 means no limit") rootCmd.Flags().IntVar(&options.TiProxy.UpTimeout, "tiproxy.timeout", 60, "TiProxy max wait time in seconds for starting, 0 means no limit") rootCmd.Flags().StringVar(&options.Host, "host", "127.0.0.1", "Playground cluster host") rootCmd.Flags().StringVar(&options.TiDB.Host, "db.host", "", "Playground TiDB host. If not provided, TiDB will still use `host` flag as its host") rootCmd.Flags().IntVar(&options.TiDB.Port, "db.port", 0, "Playground TiDB port. If not provided, TiDB will use 4000 as its port. Or 6000 if TiProxy is enabled.") rootCmd.Flags().StringVar(&options.PD.Host, "pd.host", "", "Playground PD host. If not provided, PD will still use `host` flag as its host") rootCmd.Flags().IntVar(&options.PD.Port, "pd.port", 0, "Playground PD port. If not provided, PD will use 2379 as its port") rootCmd.Flags().StringVar(&options.TiKV.Host, "kv.host", "", "Playground TiKV host. If not provided, TiKV will still use `host` flag as its host") rootCmd.Flags().IntVar(&options.TiKV.Port, "kv.port", 0, "Playground TiKV port. If not provided, TiKV will use 20160 as its port") rootCmd.Flags().StringVar(&options.TiCDC.Host, "ticdc.host", "", "Playground TiCDC host. If not provided, TiDB will still use `host` flag as its host") rootCmd.Flags().IntVar(&options.TiCDC.Port, "ticdc.port", 0, "Playground TiCDC port. If not provided, TiCDC will use 8300 as its port") rootCmd.Flags().StringVar(&options.TiProxy.Host, "tiproxy.host", "", "Playground TiProxy host. If not provided, TiProxy will still use `host` flag as its host") rootCmd.Flags().IntVar(&options.TiProxy.Port, "tiproxy.port", 0, "Playground TiProxy port. If not provided, TiProxy will use 6000 as its port") rootCmd.Flags().StringVar(&options.DMMaster.Host, "dm-master.host", "", "DM-master instance host") rootCmd.Flags().IntVar(&options.DMMaster.Port, "dm-master.port", 8261, "DM-master instance port") rootCmd.Flags().StringVar(&options.DMWorker.Host, "dm-worker.host", "", "DM-worker instance host") rootCmd.Flags().IntVar(&options.DMWorker.Port, "dm-worker.port", 8262, "DM-worker instance port") rootCmd.Flags().StringVar(&options.TiKVWorker.Host, "tikv.worker.host", "", "TiKV worker instance host") rootCmd.Flags().IntVar(&options.TiKVWorker.Port, "tikv.worker.port", 19000, "TiKV worker instance port") rootCmd.Flags().StringVar(&options.TiDB.ConfigPath, "db.config", "", "TiDB instance configuration file") rootCmd.Flags().StringVar(&options.TiKV.ConfigPath, "kv.config", "", "TiKV instance configuration file") rootCmd.Flags().StringVar(&options.PD.ConfigPath, "pd.config", "", "PD instance configuration file") rootCmd.Flags().StringVar(&options.TSO.ConfigPath, "tso.config", "", "TSO instance configuration file") rootCmd.Flags().StringVar(&options.Scheduling.ConfigPath, "scheduling.config", "", "Scheduling instance configuration file") rootCmd.Flags().StringVar(&options.TiProxy.ConfigPath, "tiproxy.config", "", "TiProxy instance configuration file") rootCmd.Flags().StringVar(&options.TiFlash.ConfigPath, "tiflash.config", "", "TiFlash instance configuration file, when --mode=tidb-cse or --mode=tiflash-disagg this will set config file for both Write Node and Compute Node") rootCmd.Flags().StringVar(&options.TiFlashWrite.ConfigPath, "tiflash.write.config", "", "TiFlash Write instance configuration file, available when --mode=tidb-cse or --mode=tiflash-disagg, take precedence over --tiflash.config") rootCmd.Flags().StringVar(&options.TiFlashCompute.ConfigPath, "tiflash.compute.config", "", "TiFlash Compute instance configuration file, available when --mode=tidb-cse or --mode=tiflash-disagg, take precedence over --tiflash.config") rootCmd.Flags().StringVar(&options.Pump.ConfigPath, "pump.config", "", "Pump instance configuration file") rootCmd.Flags().StringVar(&options.Drainer.ConfigPath, "drainer.config", "", "Drainer instance configuration file") rootCmd.Flags().StringVar(&options.TiCDC.ConfigPath, "ticdc.config", "", "TiCDC instance configuration file") rootCmd.Flags().StringVar(&options.TiKVCDC.ConfigPath, "kvcdc.config", "", "TiKV-CDC instance configuration file") rootCmd.Flags().StringVar(&options.DMMaster.ConfigPath, "dm-master.config", "", "DM-master instance configuration file") rootCmd.Flags().StringVar(&options.DMWorker.ConfigPath, "dm-worker.config", "", "DM-worker instance configuration file") rootCmd.Flags().StringVar(&options.TiKVWorker.ConfigPath, "tikv.worker.config", "", "TiKV worker instance configuration file") rootCmd.Flags().StringVar(&options.TiDB.BinPath, "db.binpath", "", "TiDB instance binary path") rootCmd.Flags().StringVar(&options.TiKV.BinPath, "kv.binpath", "", "TiKV instance binary path") rootCmd.Flags().StringVar(&options.PD.BinPath, "pd.binpath", "", "PD instance binary path") rootCmd.Flags().StringVar(&options.TSO.BinPath, "tso.binpath", "", "TSO instance binary path") rootCmd.Flags().StringVar(&options.Scheduling.BinPath, "scheduling.binpath", "", "Scheduling instance binary path") rootCmd.Flags().StringVar(&options.TiProxy.BinPath, "tiproxy.binpath", "", "TiProxy instance binary path") rootCmd.Flags().StringVar(&options.TiProxy.Version, "tiproxy.version", "", "TiProxy instance version") rootCmd.Flags().StringVar(&options.TiFlash.BinPath, "tiflash.binpath", "", "TiFlash instance binary path, when --mode=tidb-cse or --mode=tiflash-disagg this will set binary path for both Write Node and Compute Node") rootCmd.Flags().StringVar(&options.TiFlashWrite.BinPath, "tiflash.write.binpath", "", "TiFlash Write instance binary path, available when --mode=tidb-cse or --mode=tiflash-disagg, take precedence over --tiflash.binpath") rootCmd.Flags().StringVar(&options.TiFlashCompute.BinPath, "tiflash.compute.binpath", "", "TiFlash Compute instance binary path, available when --mode=tidb-cse or --mode=tiflash-disagg, take precedence over --tiflash.binpath") rootCmd.Flags().StringVar(&options.TiCDC.BinPath, "ticdc.binpath", "", "TiCDC instance binary path") rootCmd.Flags().StringVar(&options.TiKVCDC.BinPath, "kvcdc.binpath", "", "TiKV-CDC instance binary path") rootCmd.Flags().StringVar(&options.Pump.BinPath, "pump.binpath", "", "Pump instance binary path") rootCmd.Flags().StringVar(&options.Drainer.BinPath, "drainer.binpath", "", "Drainer instance binary path") rootCmd.Flags().StringVar(&options.DMMaster.BinPath, "dm-master.binpath", "", "DM-master instance binary path") rootCmd.Flags().StringVar(&options.DMWorker.BinPath, "dm-worker.binpath", "", "DM-worker instance binary path") rootCmd.Flags().StringVar(&options.TiKVWorker.BinPath, "tikv.worker.binpath", "", "TiKV worker instance binary path. If a path of `tikv-server` is specified, `tikv-worker` in the same directory will be used") rootCmd.Flags().StringVar(&options.TiKVCDC.Version, "kvcdc.version", "", "TiKV-CDC instance version") rootCmd.AddCommand(newDisplay()) rootCmd.AddCommand(newScaleOut()) rootCmd.AddCommand(newScaleIn()) return rootCmd.Execute() } func populateDefaultOpt(flagSet *pflag.FlagSet) error { if flagSet.Lookup("without-monitor").Changed { v, _ := flagSet.GetBool("without-monitor") options.Monitor = !v } defaultInt := func(variable *int, flagName string, defaultValue int) { if !flagSet.Lookup(flagName).Changed { *variable = defaultValue } } defaultStr := func(variable *string, flagName string, defaultValue string) { if !flagSet.Lookup(flagName).Changed { *variable = defaultValue } } switch options.ShOpt.Mode { case "tidb": defaultInt(&options.TiDB.Num, "db", 1) defaultInt(&options.TiKV.Num, "kv", 1) defaultInt(&options.TiFlash.Num, "tiflash", 1) case "tikv-slim": defaultInt(&options.TiKV.Num, "kv", 1) case "tidb-cse", "tiflash-disagg": defaultInt(&options.TiDB.Num, "db", 1) defaultInt(&options.TiKV.Num, "kv", 1) defaultInt(&options.TiFlash.Num, "tiflash", 1) defaultInt(&options.TiFlashWrite.Num, "tiflash.write", options.TiFlash.Num) defaultStr(&options.TiFlashWrite.BinPath, "tiflash.write.binpath", options.TiFlash.BinPath) defaultStr(&options.TiFlashWrite.ConfigPath, "tiflash.write.config", options.TiFlash.ConfigPath) options.TiFlashWrite.UpTimeout = options.TiFlash.UpTimeout defaultInt(&options.TiFlashCompute.Num, "tiflash.compute", options.TiFlash.Num) defaultStr(&options.TiFlashCompute.BinPath, "tiflash.compute.binpath", options.TiFlash.BinPath) defaultStr(&options.TiFlashCompute.ConfigPath, "tiflash.compute.config", options.TiFlash.ConfigPath) options.TiFlashCompute.UpTimeout = options.TiFlash.UpTimeout // Note: if a path of `tikv-server` is specified, the real resolved path of tikv-worker will become `tikv-worker` in the same directory. defaultInt(&options.TiKVWorker.Num, "tikv.worker", 1) defaultStr(&options.TiKVWorker.BinPath, "tikv.worker.binpath", options.TiKV.BinPath) default: return errors.Errorf("Unknown --mode %s", options.ShOpt.Mode) } switch options.ShOpt.PDMode { case "pd": defaultInt(&options.PD.Num, "pd", 1) case "ms": defaultInt(&options.PD.Num, "pd", 1) defaultStr(&options.PD.BinPath, "pd.binpath", options.PD.BinPath) defaultStr(&options.PD.ConfigPath, "pd.config", options.PD.ConfigPath) defaultInt(&options.TSO.Num, "tso", 1) defaultStr(&options.TSO.BinPath, "tso.binpath", options.PD.BinPath) defaultStr(&options.TSO.ConfigPath, "tso.config", options.PD.ConfigPath) defaultInt(&options.Scheduling.Num, "scheduling", 1) defaultStr(&options.Scheduling.BinPath, "scheduling.binpath", options.PD.BinPath) defaultStr(&options.Scheduling.ConfigPath, "scheduling.config", options.PD.ConfigPath) default: return errors.Errorf("Unknown --pd.mode %s", options.ShOpt.PDMode) } return nil } func tryConnect(addr string, timeoutSec int) error { conn, err := net.DialTimeout("tcp", addr, time.Duration(timeoutSec)*time.Second) if err != nil { return err } defer conn.Close() return nil } // checkDB check if the addr is connectable by getting a connection from sql.DB. timeout <=0 means no timeout func checkDB(dbAddr string, timeout int) bool { if timeout > 0 { for range timeout { if tryConnect(dbAddr, timeout) == nil { return true } time.Sleep(time.Second) } return false } for { if err := tryConnect(dbAddr, timeout); err == nil { return true } time.Sleep(time.Second) } } // checkStoreStatus uses pd client to check whether a store is up. timeout <= 0 means no timeout func checkStoreStatus(pdClient *api.PDClient, storeAddr string, timeout int) bool { if timeout > 0 { for range timeout { if up, err := pdClient.IsUp(storeAddr); err == nil && up { return true } time.Sleep(time.Second) } return false } for { if up, err := pdClient.IsUp(storeAddr); err == nil && up { return true } time.Sleep(time.Second) } } func checkDMMasterStatus(dmMasterClient *api.DMMasterClient, dmMasterAddr string, timeout int) bool { if timeout > 0 { for range timeout { if _, isActive, _, err := dmMasterClient.GetMaster(dmMasterAddr); err == nil && isActive { return true } time.Sleep(time.Second) } return false } for { if _, isActive, _, err := dmMasterClient.GetMaster(dmMasterAddr); err == nil && isActive { return true } time.Sleep(time.Second) } } func hasDashboard(pdAddr string) bool { resp, err := http.Get(fmt.Sprintf("http://%s/dashboard", pdAddr)) if err != nil { return false } defer resp.Body.Close() return resp.StatusCode == 200 } // getAbsolutePath returns the absolute path func getAbsolutePath(path string) (string, error) { if path == "" { return "", nil } if !filepath.IsAbs(path) && !strings.HasPrefix(path, "~/") { wd, err := os.Getwd() if err != nil { return "", err } if wd == "" { return "", errors.New("playground running at non-tiup mode") } path = filepath.Join(wd, path) } if strings.HasPrefix(path, "~/") { usr, err := user.Current() if err != nil { return "", errors.Annotatef(err, "retrieve user home failed") } path = filepath.Join(usr.HomeDir, path[2:]) } absPath, err := filepath.Abs(path) if err != nil { return "", errors.AddStack(err) } return absPath, nil } func dumpPort(fname string, port int) error { return utils.WriteFile(fname, []byte(strconv.Itoa(port)), 0o644) } func loadPort(dir string) (port int, err error) { data, err := os.ReadFile(filepath.Join(dir, "port")) if err != nil { return 0, err } port, err = strconv.Atoi(string(data)) return } func dumpDSN(fname string, dbs []*instance.TiDBInstance, tdbs []*instance.TiProxy) { var dsn []string for _, db := range dbs { dsn = append(dsn, fmt.Sprintf("mysql://root@%s", db.Addr())) } for _, tdb := range tdbs { dsn = append(dsn, fmt.Sprintf("mysql://root@%s", tdb.Addr())) } _ = utils.WriteFile(fname, []byte(strings.Join(dsn, "\n")), 0o644) } func newEtcdClient(endpoint string) (*clientv3.Client, error) { // Because etcd client does not support setting logger directly, // the configuration of pingcap/log is copied here. zapCfg := zap.NewProductionConfig() zapCfg.OutputPaths = []string{"stderr"} zapCfg.ErrorOutputPaths = []string{"stderr"} client, err := clientv3.New(clientv3.Config{ Endpoints: []string{endpoint}, DialTimeout: 5 * time.Second, LogConfig: &zapCfg, }) if err != nil { return nil, err } return client, nil } func main() { code := 0 err := execute() if err != nil { fmt.Println(color.RedString("Error: %v", err)) code = 1 } removeData() if code != 0 { os.Exit(code) } } func removeData() { if deleteWhenExit { os.RemoveAll(dataDir) } } tiup-1.16.3/components/playground/main_test.go000066400000000000000000000021041505422223000214360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "os" "strings" "testing" ) // To build: // see build_tiup_playground_test in Makefile // To run: // tiup-playground.test -test.coverprofile={file} __DEVEL--i-heard-you-like-tests func TestMain(t *testing.T) { var ( args []string run bool ) for _, arg := range os.Args { switch { case arg == "__DEVEL--i-heard-you-like-tests": run = true case strings.HasPrefix(arg, "-test"): case strings.HasPrefix(arg, "__DEVEL"): default: args = append(args, arg) } } os.Args = args // fmt.Println(os.Args) if run { main() } } tiup-1.16.3/components/playground/monitor.go000066400000000000000000000071051505422223000211500ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "encoding/json" "fmt" "os" "os/exec" "path/filepath" "sync" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/playground/instance" tiupexec "github.com/pingcap/tiup/pkg/exec" "github.com/pingcap/tiup/pkg/utils" ) // ref: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config func (m *monitor) renderSDFile(cid2targets map[string]instance.MetricAddr) error { cid2targets["prometheus"] = instance.MetricAddr{Targets: []string{utils.JoinHostPort(m.host, m.port)}} var items []instance.MetricAddr for id, t := range cid2targets { it := instance.MetricAddr{ Targets: t.Targets, Labels: map[string]string{"job": id}, } for k, v := range t.Labels { it.Labels[k] = v } items = append(items, it) } data, err := json.MarshalIndent(&items, "", "\t") if err != nil { return errors.AddStack(err) } err = utils.WriteFile(m.sdFname, data, 0644) if err != nil { return errors.AddStack(err) } return nil } type monitor struct { host string port int cmd *exec.Cmd sdFname string waitErr error waitOnce sync.Once } func (m *monitor) wait() error { m.waitOnce.Do(func() { m.waitErr = m.cmd.Wait() }) return m.waitErr } // the cmd is not started after return func newMonitor(ctx context.Context, shOpt instance.SharedOptions, version string, host, dir string) (*monitor, error) { if err := utils.MkdirAll(dir, 0755); err != nil { return nil, errors.AddStack(err) } port := utils.MustGetFreePort(host, 9090, shOpt.PortOffset) addr := utils.JoinHostPort(host, port) tmpl := ` global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: - job_name: 'cluster' file_sd_configs: - files: - targets.json ` m := new(monitor) m.sdFname = filepath.Join(dir, "targets.json") if err := utils.WriteFile(filepath.Join(dir, "prometheus.yml"), []byte(tmpl), os.ModePerm); err != nil { return nil, errors.AddStack(err) } args := []string{ fmt.Sprintf("--config.file=%s", filepath.Join(dir, "prometheus.yml")), fmt.Sprintf("--web.external-url=http://%s", addr), fmt.Sprintf("--web.listen-address=%s", utils.JoinHostPort(host, port)), fmt.Sprintf("--storage.tsdb.path=%s", filepath.Join(dir, "data")), } var binPath string var err error if binPath, err = tiupexec.PrepareBinary("prometheus", utils.Version(version), binPath); err != nil { return nil, err } cmd := instance.PrepareCommand(ctx, binPath, args, nil, dir) m.port = port m.cmd = cmd m.host = host return m, nil } tiup-1.16.3/components/playground/ngmonitoring.go000066400000000000000000000041731505422223000221750ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "os/exec" "path/filepath" "strings" "sync" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/playground/instance" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/utils" ) type ngMonitoring struct { host string port int cmd *exec.Cmd waitErr error waitOnce sync.Once } func (m *ngMonitoring) wait() error { m.waitOnce.Do(func() { m.waitErr = m.cmd.Wait() }) return m.waitErr } // the cmd is not started after return func newNGMonitoring(ctx context.Context, shOpt instance.SharedOptions, version string, host, dir string, pds []*instance.PDInstance) (*ngMonitoring, error) { if err := utils.MkdirAll(dir, 0755); err != nil { return nil, errors.AddStack(err) } port := utils.MustGetFreePort(host, 12020, shOpt.PortOffset) m := new(ngMonitoring) var endpoints []string for _, pd := range pds { endpoints = append(endpoints, utils.JoinHostPort(pd.Host, pd.StatusPort)) } args := []string{ fmt.Sprintf("--pd.endpoints=%s", strings.Join(endpoints, ",")), fmt.Sprintf("--address=%s", utils.JoinHostPort(host, port)), fmt.Sprintf("--advertise-address=%s", utils.JoinHostPort(host, port)), fmt.Sprintf("--storage.path=%s", filepath.Join(dir, "data")), fmt.Sprintf("--log.path=%s", filepath.Join(dir, "logs")), } env := environment.GlobalEnv() binDir, err := env.Profile().ComponentInstalledPath("prometheus", utils.Version(version)) if err != nil { return nil, err } cmd := instance.PrepareCommand(ctx, filepath.Join(binDir, "ng-monitoring-server"), args, nil, dir) m.port = port m.cmd = cmd m.host = host return m, nil } tiup-1.16.3/components/playground/playground.go000066400000000000000000001413671505422223000216560ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "encoding/json" "fmt" "io" "net/http" "os" "os/exec" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "sync/atomic" "syscall" "time" "slices" "github.com/fatih/color" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/pingcap/errors" "github.com/pingcap/tiup/components/playground/instance" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/environment" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/tui/colorstr" "github.com/pingcap/tiup/pkg/tui/progress" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/mod/semver" "golang.org/x/sync/errgroup" ) // The duration process need to quit gracefully, or we kill the process. const forceKillAfterDuration = time.Second * 10 // Playground represent the playground of a cluster. type Playground struct { dataDir string booted bool // the latest receive signal curSig int32 bootOptions *BootOptions port int pds []*instance.PDInstance tsos []*instance.PDInstance schedulings []*instance.PDInstance tikvs []*instance.TiKVInstance tikvWorkers []*instance.TiKVWorkerInstance tidbs []*instance.TiDBInstance tiflashs []*instance.TiFlashInstance tiproxys []*instance.TiProxy ticdcs []*instance.TiCDC tikvCdcs []*instance.TiKVCDC pumps []*instance.Pump drainers []*instance.Drainer dmMasters []*instance.DMMaster dmWorkers []*instance.DMWorker startedInstances []instance.Instance idAlloc map[string]int instanceWaiter errgroup.Group // not nil iff we start the exec.Cmd successfully. // we should and can safely call wait() to make sure the process quit // before playground quit. monitor *monitor ngmonitoring *ngMonitoring grafana *grafana } // MonitorInfo represent the monitor type MonitorInfo struct { IP string `json:"ip"` Port int `json:"port"` BinaryPath string `json:"binary_path"` } // NewPlayground create a Playground instance. func NewPlayground(dataDir string, port int) *Playground { return &Playground{ dataDir: dataDir, port: port, idAlloc: make(map[string]int), } } func (p *Playground) allocID(componentID string) int { id := p.idAlloc[componentID] p.idAlloc[componentID] = id + 1 return id } func (p *Playground) handleDisplay(r io.Writer) (err error) { // TODO add more info. td := utils.NewTableDisplayer(r, []string{"Pid", "Role", "Uptime"}) err = p.WalkInstances(func(componentID string, ins instance.Instance) error { td.AddRow(strconv.Itoa(ins.Pid()), componentID, ins.Uptime()) return nil }) if err != nil { return err } td.Display() return nil } var timeoutOpt = &utils.RetryOption{ Timeout: time.Second * 15, Delay: time.Second * 5, } func (p *Playground) binlogClient() (*api.BinlogClient, error) { var addrs []string for _, inst := range p.pds { addrs = append(addrs, inst.Addr()) } return api.NewBinlogClient(addrs, 5*time.Second, nil) } func (p *Playground) dmMasterClient() *api.DMMasterClient { var addrs []string for _, inst := range p.dmMasters { addrs = append(addrs, inst.Addr()) } return api.NewDMMasterClient(addrs, 5*time.Second, nil) } func (p *Playground) pdClient() *api.PDClient { var addrs []string for _, inst := range p.pds { addrs = append(addrs, inst.Addr()) } return api.NewPDClient( context.WithValue(context.TODO(), logprinter.ContextKeyLogger, log), addrs, 10*time.Second, nil, ) } func (p *Playground) killKVIfTombstone(inst *instance.TiKVInstance) { defer logIfErr(p.renderSDFile()) for { tombstone, err := p.pdClient().IsTombStone(inst.Addr()) if err != nil { fmt.Println(err) } if tombstone { for i, e := range p.tikvs { if e == inst { fmt.Printf("stop tombstone tikv %s\n", inst.Addr()) err = syscall.Kill(inst.Pid(), syscall.SIGQUIT) if err != nil { fmt.Println(err) } p.tikvs = slices.Delete(p.tikvs, i, i+1) return } } } time.Sleep(time.Second * 5) } } func (p *Playground) removePumpWhenTombstone(c *api.BinlogClient, inst *instance.Pump) { defer logIfErr(p.renderSDFile()) for { tombstone, err := c.IsPumpTombstone(context.TODO(), inst.Addr()) if err != nil { fmt.Println(err) } if tombstone { for i, e := range p.pumps { if e == inst { fmt.Printf("pump already offline %s\n", inst.Addr()) p.pumps = slices.Delete(p.pumps, i, i+1) return } } } time.Sleep(time.Second * 5) } } func (p *Playground) removeDrainerWhenTombstone(c *api.BinlogClient, inst *instance.Drainer) { defer logIfErr(p.renderSDFile()) for { tombstone, err := c.IsDrainerTombstone(context.TODO(), inst.Addr()) if err != nil { fmt.Println(err) } if tombstone { for i, e := range p.drainers { if e == inst { fmt.Printf("drainer already offline %s\n", inst.Addr()) p.drainers = slices.Delete(p.drainers, i, i+1) return } } } time.Sleep(time.Second * 5) } } func (p *Playground) killTiFlashIfTombstone(inst *instance.TiFlashInstance) { defer logIfErr(p.renderSDFile()) for { tombstone, err := p.pdClient().IsTombStone(inst.Addr()) if err != nil { fmt.Println(err) } if tombstone { for i, e := range p.tiflashs { if e == inst { fmt.Printf("stop tombstone tiflash %s\n", inst.Addr()) err = syscall.Kill(inst.Pid(), syscall.SIGQUIT) if err != nil { fmt.Println(err) } p.tiflashs = slices.Delete(p.tiflashs, i, i+1) return } } } time.Sleep(time.Second * 5) } } func (p *Playground) handleScaleIn(w io.Writer, pid int) error { var cid string var inst instance.Instance err := p.WalkInstances(func(wcid string, winst instance.Instance) error { if winst.Pid() == pid { cid = wcid inst = winst } return nil }) if err != nil { return err } if inst == nil { fmt.Fprintf(w, "no instance with id: %d\n", pid) return nil } switch cid { case spec.ComponentPD: for i := 0; i < len(p.pds); i++ { if p.pds[i].Pid() == pid { inst := p.pds[i] err := p.pdClient().DelPD(inst.Name(), timeoutOpt) if err != nil { return err } p.pds = slices.Delete(p.pds, i, i+1) } } case spec.ComponentTSO: for i := 0; i < len(p.tsos); i++ { if p.tsos[i].Pid() == pid { p.tsos = slices.Delete(p.tsos, i, i+1) } } case spec.ComponentScheduling: for i := 0; i < len(p.schedulings); i++ { if p.schedulings[i].Pid() == pid { p.schedulings = slices.Delete(p.schedulings, i, i+1) } } case spec.ComponentTiKV: for i := 0; i < len(p.tikvs); i++ { if p.tikvs[i].Pid() == pid { inst := p.tikvs[i] err := p.pdClient().DelStore(inst.Addr(), timeoutOpt) if err != nil { return err } go p.killKVIfTombstone(inst) fmt.Fprintf(w, "tikv will be stop when tombstone\n") return nil } } case spec.ComponentTiDB: for i := 0; i < len(p.tidbs); i++ { if p.tidbs[i].Pid() == pid { p.tidbs = slices.Delete(p.tidbs, i, i+1) } } case spec.ComponentCDC: for i := 0; i < len(p.ticdcs); i++ { if p.ticdcs[i].Pid() == pid { p.ticdcs = slices.Delete(p.ticdcs, i, i+1) } } case spec.ComponentTiProxy: for i := 0; i < len(p.tiproxys); i++ { if p.tiproxys[i].Pid() == pid { p.tiproxys = slices.Delete(p.tiproxys, i, i+1) } } case spec.ComponentTiKVCDC: for i := 0; i < len(p.tikvCdcs); i++ { if p.tikvCdcs[i].Pid() == pid { p.tikvCdcs = slices.Delete(p.tikvCdcs, i, i+1) } } case spec.ComponentTiFlash: for i := 0; i < len(p.tiflashs); i++ { if p.tiflashs[i].Pid() == pid { inst := p.tiflashs[i] err := p.pdClient().DelStore(inst.Addr(), timeoutOpt) if err != nil { return err } go p.killTiFlashIfTombstone(inst) fmt.Fprintf(w, "TiFlash will be stop when tombstone\n") return nil } } case spec.ComponentPump: for i := 0; i < len(p.pumps); i++ { if p.pumps[i].Pid() == pid { inst := p.pumps[i] c, err := p.binlogClient() if err != nil { return err } err = c.OfflinePump(context.TODO(), inst.Addr()) if err != nil { return err } go p.removePumpWhenTombstone(c, inst) fmt.Fprintf(w, "pump will be stop when offline\n") return nil } } case spec.ComponentDrainer: for i := 0; i < len(p.drainers); i++ { if p.drainers[i].Pid() == pid { inst := p.drainers[i] c, err := p.binlogClient() if err != nil { return err } err = c.OfflineDrainer(context.TODO(), inst.Addr()) if err != nil { return err } go p.removeDrainerWhenTombstone(c, inst) fmt.Fprintf(w, "drainer will be stop when offline\n") return nil } } case spec.ComponentDMWorker: if err := p.handleScaleInDMWorker(pid); err != nil { return err } case spec.ComponentDMMaster: if err := p.handleScaleInDMMaster(pid); err != nil { return err } default: fmt.Fprintf(w, "unknown component in scale in: %s", cid) return nil } err = syscall.Kill(pid, syscall.SIGQUIT) if err != nil { return errors.AddStack(err) } logIfErr(p.renderSDFile()) fmt.Fprintf(w, "scale in %s success\n", cid) return nil } func (p *Playground) handleScaleInDMWorker(pid int) error { for i := 0; i < len(p.dmWorkers); i++ { if p.dmWorkers[i].Pid() == pid { inst := p.dmWorkers[i] c := p.dmMasterClient() if err := c.OfflineWorker(inst.Name(), nil); err != nil { return err } p.dmWorkers = slices.Delete(p.dmWorkers, i, i+1) return nil } } return nil } func (p *Playground) handleScaleInDMMaster(pid int) error { for i := 0; i < len(p.dmMasters); i++ { if p.dmMasters[i].Pid() == pid { inst := p.dmMasters[i] c := p.dmMasterClient() if err := c.OfflineMaster(inst.Name(), nil); err != nil { return err } p.dmMasters = slices.Delete(p.dmMasters, i, i+1) return nil } } return nil } func (p *Playground) sanitizeConfig(boot instance.Config, cfg *instance.Config) error { if cfg.BinPath == "" { cfg.BinPath = boot.BinPath } if cfg.ConfigPath == "" { cfg.ConfigPath = boot.ConfigPath } if cfg.Host == "" { cfg.Host = boot.Host } path, err := getAbsolutePath(cfg.ConfigPath) if err != nil { return err } cfg.ConfigPath = path return nil } func (p *Playground) sanitizeComponentConfig(cid string, cfg *instance.Config) error { switch cid { case spec.ComponentPD: return p.sanitizeConfig(p.bootOptions.PD, cfg) case spec.ComponentTSO: return p.sanitizeConfig(p.bootOptions.TSO, cfg) case spec.ComponentScheduling: return p.sanitizeConfig(p.bootOptions.Scheduling, cfg) case spec.ComponentTiKV: return p.sanitizeConfig(p.bootOptions.TiKV, cfg) case spec.ComponentTiKVWorker: return p.sanitizeConfig(p.bootOptions.TiKVWorker, cfg) case spec.ComponentTiDB: return p.sanitizeConfig(p.bootOptions.TiDB, cfg) case spec.ComponentTiFlash: return p.sanitizeConfig(p.bootOptions.TiFlash, cfg) case spec.ComponentCDC: return p.sanitizeConfig(p.bootOptions.TiCDC, cfg) case spec.ComponentTiKVCDC: return p.sanitizeConfig(p.bootOptions.TiKVCDC, cfg) case spec.ComponentPump: return p.sanitizeConfig(p.bootOptions.Pump, cfg) case spec.ComponentDrainer: return p.sanitizeConfig(p.bootOptions.Drainer, cfg) case spec.ComponentTiProxy: return p.sanitizeConfig(p.bootOptions.TiProxy, cfg) case spec.ComponentDMMaster: return p.sanitizeConfig(p.bootOptions.DMMaster, cfg) case spec.ComponentDMWorker: return p.sanitizeConfig(p.bootOptions.DMWorker, cfg) default: return fmt.Errorf("unknown %s in sanitizeConfig", cid) } } func (p *Playground) startInstance(ctx context.Context, inst instance.Instance) error { var version utils.Version var err error boundVersion := p.bindVersion(inst.Component(), p.bootOptions.Version) component := inst.Component() if component == "tso" || component == "scheduling" { component = string(instance.PDRoleNormal) } if component == "tikv_worker" { component = "tikv" } if version, err = environment.GlobalEnv().V1Repository().ResolveComponentVersion(component, boundVersion); err != nil { return err } if err := inst.PrepareBinary(component, inst.Component(), version); err != nil { return err } if err = inst.Start(ctx); err != nil { return err } p.addWaitInstance(inst) return nil } func (p *Playground) addWaitInstance(inst instance.Instance) { p.startedInstances = append(p.startedInstances, inst) p.instanceWaiter.Go(func() error { err := inst.Wait() if err != nil && atomic.LoadInt32(&p.curSig) == 0 { fmt.Print(color.RedString("%s quit: %s\n", inst.Component(), err.Error())) if lines, _ := utils.TailN(inst.LogFile(), 10); len(lines) > 0 { for _, line := range lines { fmt.Println(line) } fmt.Print(color.YellowString("...\ncheck detail log from: %s\n", inst.LogFile())) } } else { fmt.Printf("%s quit\n", inst.Component()) } return err }) } func (p *Playground) handleScaleOut(w io.Writer, cmd *Command) error { // Ignore Config.Num, always one command as scale out one instance. err := p.sanitizeComponentConfig(cmd.ComponentID, &cmd.Config) if err != nil { return err } // TODO: Support scale-out in CSE mode inst, err := p.addInstance(cmd.ComponentID, instance.PDRoleNormal, instance.TiFlashRoleNormal, cmd.Config) if err != nil { return err } err = p.startInstance( context.WithValue(context.TODO(), logprinter.ContextKeyLogger, log), inst, ) if err != nil { return err } mysql := mysqlCommand() if cmd.ComponentID == "tidb" { addr := p.tidbs[len(p.tidbs)-1].Addr() if checkDB(addr, cmd.UpTimeout) { ss := strings.Split(addr, ":") connectMsg := "To connect new added TiDB: %s --host %s --port %s -u root -p (no password)" fmt.Println(color.GreenString(connectMsg, mysql, ss[0], ss[1])) fmt.Fprintln(w, color.GreenString(connectMsg, mysql, ss[0], ss[1])) } } if cmd.ComponentID == "tiproxy" { addr := p.tiproxys[len(p.tidbs)-1].Addr() if checkDB(addr, cmd.UpTimeout) { ss := strings.Split(addr, ":") connectMsg := "To connect to the newly added TiProxy: %s --host %s --port %s -u root -p (no password)" fmt.Println(color.GreenString(connectMsg, mysql, ss[0], ss[1])) fmt.Fprintln(w, color.GreenString(connectMsg, mysql, ss[0], ss[1])) } } logIfErr(p.renderSDFile()) return nil } func (p *Playground) handleCommand(cmd *Command, w io.Writer) error { fmt.Printf("receive command: %s\n", cmd.CommandType) switch cmd.CommandType { case DisplayCommandType: return p.handleDisplay(w) case ScaleInCommandType: return p.handleScaleIn(w, cmd.PID) case ScaleOutCommandType: return p.handleScaleOut(w, cmd) } return nil } func (p *Playground) listenAndServeHTTP() error { http.HandleFunc("/command", p.commandHandler) return http.ListenAndServe(":"+strconv.Itoa(p.port), nil) } func (p *Playground) commandHandler(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { w.WriteHeader(http.StatusMethodNotAllowed) return } data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(403) fmt.Fprintln(w, err) return } cmd := new(Command) err = json.Unmarshal(data, cmd) if err != nil { w.WriteHeader(403) fmt.Fprintln(w, err) return } // Mapping command line component id to internal spec component id. if cmd.ComponentID == "ticdc" { cmd.ComponentID = spec.ComponentCDC } err = p.handleCommand(cmd, w) if err != nil { w.WriteHeader(403) fmt.Fprintln(w, err) } } // RWalkInstances work like WalkInstances, but in the reverse order. func (p *Playground) RWalkInstances(fn func(componentID string, ins instance.Instance) error) error { var ids []string var instances []instance.Instance _ = p.WalkInstances(func(id string, ins instance.Instance) error { ids = append(ids, id) instances = append(instances, ins) return nil }) for i := len(ids); i > 0; i-- { err := fn(ids[i-1], instances[i-1]) if err != nil { return err } } return nil } // WalkInstances call fn for every instance and stop if return not nil. func (p *Playground) WalkInstances(fn func(componentID string, ins instance.Instance) error) error { for _, ins := range p.pds { err := fn(spec.ComponentPD, ins) if err != nil { return err } } for _, ins := range p.tsos { err := fn(spec.ComponentTSO, ins) if err != nil { return err } } for _, ins := range p.schedulings { err := fn(spec.ComponentScheduling, ins) if err != nil { return err } } for _, ins := range p.tikvs { err := fn(spec.ComponentTiKV, ins) if err != nil { return err } } for _, ins := range p.tikvWorkers { err := fn(spec.ComponentTiKVWorker, ins) if err != nil { return err } } for _, ins := range p.pumps { err := fn(spec.ComponentPump, ins) if err != nil { return err } } for _, ins := range p.tidbs { err := fn(spec.ComponentTiDB, ins) if err != nil { return err } } for _, ins := range p.tiproxys { err := fn(spec.ComponentTiProxy, ins) if err != nil { return err } } for _, ins := range p.ticdcs { err := fn(spec.ComponentCDC, ins) if err != nil { return err } } for _, ins := range p.tikvCdcs { err := fn(spec.ComponentTiKVCDC, ins) if err != nil { return err } } for _, ins := range p.drainers { err := fn(spec.ComponentDrainer, ins) if err != nil { return err } } for _, ins := range p.tiflashs { err := fn(spec.ComponentTiFlash, ins) if err != nil { return err } } for _, ins := range p.dmMasters { err := fn(spec.ComponentDMMaster, ins) if err != nil { return err } } for _, ins := range p.dmWorkers { err := fn(spec.ComponentDMWorker, ins) if err != nil { return err } } return nil } func (p *Playground) enableBinlog() bool { return p.bootOptions.Pump.Num > 0 } func (p *Playground) addInstance(componentID string, pdRole instance.PDRole, tiflashRole instance.TiFlashRole, cfg instance.Config) (ins instance.Instance, err error) { if cfg.BinPath != "" { cfg.BinPath, err = getAbsolutePath(cfg.BinPath) if err != nil { return nil, err } } if cfg.ConfigPath != "" { cfg.ConfigPath, err = getAbsolutePath(cfg.ConfigPath) if err != nil { return nil, err } } dataDir := p.dataDir id := p.allocID(componentID) dir := filepath.Join(dataDir, fmt.Sprintf("%s-%d", componentID, id)) if componentID == string(instance.PDRoleNormal) && (pdRole != instance.PDRoleNormal && pdRole != instance.PDRoleAPI) { id = p.allocID(string(pdRole)) dir = filepath.Join(dataDir, fmt.Sprintf("%s-%d", pdRole, id)) } if err = utils.MkdirAll(dir, 0755); err != nil { return nil, err } // look more like listen ip? host := p.bootOptions.Host if cfg.Host != "" { host = cfg.Host } switch componentID { case spec.ComponentPD: inst := instance.NewPDInstance(pdRole, p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, p.pds, cfg.Port, p.bootOptions.TiKV.Num == 1) ins = inst if pdRole == instance.PDRoleNormal || pdRole == instance.PDRoleAPI { if p.booted { inst.Join(p.pds) p.pds = append(p.pds, inst) } else { p.pds = append(p.pds, inst) for _, pd := range p.pds { pd.InitCluster(p.pds) } } } else if pdRole == instance.PDRoleTSO { p.tsos = append(p.tsos, inst) } else if pdRole == instance.PDRoleScheduling { p.schedulings = append(p.schedulings, inst) } case spec.ComponentTSO: inst := instance.NewPDInstance(instance.PDRoleTSO, p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, p.pds, cfg.Port, p.bootOptions.TiKV.Num == 1) ins = inst p.tsos = append(p.tsos, inst) case spec.ComponentScheduling: inst := instance.NewPDInstance(instance.PDRoleScheduling, p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, p.pds, cfg.Port, p.bootOptions.TiKV.Num == 1) ins = inst p.schedulings = append(p.schedulings, inst) case spec.ComponentTiDB: inst := instance.NewTiDBInstance(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, cfg.Port, p.pds, dataDir, p.enableBinlog()) ins = inst p.tidbs = append(p.tidbs, inst) case spec.ComponentTiKV: inst := instance.NewTiKVInstance(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, cfg.Port, p.pds, p.tsos) ins = inst p.tikvs = append(p.tikvs, inst) case spec.ComponentTiKVWorker: inst := instance.NewTiKVWorkerInstance(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, cfg.Port, p.pds) ins = inst p.tikvWorkers = append(p.tikvWorkers, inst) case spec.ComponentTiFlash: inst := instance.NewTiFlashInstance(tiflashRole, p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, p.pds, p.tidbs, cfg.Version) ins = inst p.tiflashs = append(p.tiflashs, inst) case spec.ComponentTiProxy: if err := instance.GenTiProxySessionCerts(dataDir); err != nil { return nil, err } inst := instance.NewTiProxy(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, cfg.Port, p.pds) ins = inst p.tiproxys = append(p.tiproxys, inst) case spec.ComponentCDC: inst := instance.NewTiCDC(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, cfg.Port, p.pds) ins = inst p.ticdcs = append(p.ticdcs, inst) case spec.ComponentTiKVCDC: inst := instance.NewTiKVCDC(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, p.pds) ins = inst p.tikvCdcs = append(p.tikvCdcs, inst) case spec.ComponentPump: inst := instance.NewPump(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, p.pds) ins = inst p.pumps = append(p.pumps, inst) case spec.ComponentDrainer: inst := instance.NewDrainer(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, p.pds) ins = inst p.drainers = append(p.drainers, inst) case spec.ComponentDMMaster: inst := instance.NewDMMaster(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, cfg.Port) ins = inst p.dmMasters = append(p.dmMasters, inst) for _, master := range p.dmMasters { master.SetInitEndpoints(p.dmMasters) } case spec.ComponentDMWorker: inst := instance.NewDMWorker(p.bootOptions.ShOpt, cfg.BinPath, dir, host, cfg.ConfigPath, id, cfg.Port, p.dmMasters) ins = inst p.dmWorkers = append(p.dmWorkers, inst) default: return nil, errors.Errorf("unknown component: %s", componentID) } return } func (p *Playground) waitAllDBUp() ([]string, []string) { var tidbSucc []string var tiproxySucc []string if len(p.tidbs) > 0 { var wg sync.WaitGroup var tidbMu, tiproxyMu sync.Mutex var bars *progress.MultiBar if len(p.tiproxys) > 0 { bars = progress.NewMultiBar(colorstr.Sprintf("[dark_gray]Waiting for tidb and tiproxy instances ready")) } else { bars = progress.NewMultiBar(colorstr.Sprintf("[dark_gray]Waiting for tidb instances ready")) } for _, db := range p.tidbs { wg.Add(1) prefix := "- TiDB: " + db.Addr() bar := bars.AddBar(prefix) go func(dbInst *instance.TiDBInstance) { defer wg.Done() if s := checkDB(dbInst.Addr(), options.TiDB.UpTimeout); s { { tidbMu.Lock() tidbSucc = append(tidbSucc, dbInst.Addr()) tidbMu.Unlock() } bar.UpdateDisplay(&progress.DisplayProps{ Prefix: prefix, Mode: progress.ModeDone, }) } else { bar.UpdateDisplay(&progress.DisplayProps{ Prefix: prefix, Mode: progress.ModeError, }) } }(db) } for _, db := range p.tiproxys { wg.Add(1) prefix := "- TiProxy: " + db.Addr() bar := bars.AddBar(prefix) go func(dbInst *instance.TiProxy) { defer wg.Done() if s := checkDB(dbInst.Addr(), options.TiProxy.UpTimeout); s { { tiproxyMu.Lock() tiproxySucc = append(tiproxySucc, dbInst.Addr()) tiproxyMu.Unlock() } bar.UpdateDisplay(&progress.DisplayProps{ Prefix: prefix, Mode: progress.ModeDone, }) } else { bar.UpdateDisplay(&progress.DisplayProps{ Prefix: prefix, Mode: progress.ModeError, }) } }(db) } bars.StartRenderLoop() wg.Wait() bars.StopRenderLoop() } return tidbSucc, tiproxySucc } func (p *Playground) waitAllTiFlashUp() { if len(p.tiflashs) > 0 { var endpoints []string for _, pd := range p.pds { endpoints = append(endpoints, pd.Addr()) } pdClient := api.NewPDClient( context.WithValue(context.TODO(), logprinter.ContextKeyLogger, log), endpoints, 10*time.Second, nil, ) var wg sync.WaitGroup bars := progress.NewMultiBar(colorstr.Sprintf("[dark_gray]Waiting for tiflash instances ready")) for _, flash := range p.tiflashs { wg.Add(1) tiflashKindName := "TiFlash" if flash.Role == instance.TiFlashRoleDisaggCompute { tiflashKindName = "TiFlash (CN)" } else if flash.Role == instance.TiFlashRoleDisaggWrite { tiflashKindName = "TiFlash (WN)" } prefix := fmt.Sprintf("- %s: %s", tiflashKindName, flash.Addr()) bar := bars.AddBar(prefix) go func(flashInst *instance.TiFlashInstance) { defer wg.Done() displayResult := &progress.DisplayProps{ Prefix: prefix, } if cmd := flashInst.Cmd(); cmd == nil { displayResult.Mode = progress.ModeError displayResult.Suffix = "initialize command failed" } else if state := cmd.ProcessState; state != nil && state.Exited() { displayResult.Mode = progress.ModeError displayResult.Suffix = fmt.Sprintf("process exited with code: %d", state.ExitCode()) } else if s := checkStoreStatus(pdClient, flashInst.Addr(), options.TiFlash.UpTimeout); !s { displayResult.Mode = progress.ModeError displayResult.Suffix = "failed to up after timeout" } else { displayResult.Mode = progress.ModeDone } bar.UpdateDisplay(displayResult) }(flash) } bars.StartRenderLoop() wg.Wait() bars.StopRenderLoop() } } func (p *Playground) waitAllDMMasterUp() { if len(p.dmMasters) > 0 { var wg sync.WaitGroup bars := progress.NewMultiBar(colorstr.Sprintf("[dark_gray]Waiting for dm-master instances ready")) for _, master := range p.dmMasters { wg.Add(1) prefix := master.Addr() bar := bars.AddBar(prefix) go func(masterInst *instance.DMMaster) { defer wg.Done() displayResult := &progress.DisplayProps{ Prefix: prefix, } if cmd := masterInst.Cmd(); cmd == nil { displayResult.Mode = progress.ModeError displayResult.Suffix = "initialize command failed" } else if state := cmd.ProcessState; state != nil && state.Exited() { displayResult.Mode = progress.ModeError displayResult.Suffix = fmt.Sprintf("process exited with code: %d", state.ExitCode()) } else if s := checkDMMasterStatus(p.dmMasterClient(), masterInst.Name(), options.DMMaster.UpTimeout); !s { displayResult.Mode = progress.ModeError displayResult.Suffix = "failed to up after timeout" } else { displayResult.Mode = progress.ModeDone } bar.UpdateDisplay(displayResult) }(master) } bars.StartRenderLoop() wg.Wait() bars.StopRenderLoop() } } func (p *Playground) bindVersion(comp string, version string) (bindVersion string) { bindVersion = version switch comp { case spec.ComponentTiKVCDC: bindVersion = p.bootOptions.TiKVCDC.Version case spec.ComponentTiProxy: bindVersion = p.bootOptions.TiProxy.Version default: } return } //revive:disable:cognitive-complexity //revive:disable:error-strings func (p *Playground) bootCluster(ctx context.Context, env *environment.Environment, options *BootOptions) error { for _, cfg := range []*instance.Config{ &options.PD, &options.TSO, &options.Scheduling, &options.TiProxy, &options.TiDB, &options.TiKV, &options.TiKVWorker, &options.TiFlash, &options.TiFlashCompute, &options.TiFlashWrite, &options.Pump, &options.Drainer, &options.TiKVCDC, &options.DMMaster, &options.DMWorker, } { path, err := getAbsolutePath(cfg.ConfigPath) if err != nil { return errors.Annotatef(err, "cannot eval absolute directory: %s", cfg.ConfigPath) } cfg.ConfigPath = path } p.bootOptions = options // All others components depend on the pd except dm, we just ensure the pd count must be great than 0 if options.ShOpt.PDMode != "ms" && options.PD.Num < 1 && options.DMMaster.Num < 1 { return fmt.Errorf("all components count must be great than 0 (pd=%v)", options.PD.Num) } if options.ShOpt.Mode != "tidb-cse" { if options.TiKVWorker.Num > 0 { return fmt.Errorf("TiKV worker is only supported in tidb-cse mode") } } if options.ShOpt.Mode == "tidb-cse" { if options.TiKVWorker.Num > 1 { return fmt.Errorf("TiKV worker only supports at most 1 instance") } } if !utils.Version(options.Version).IsNightly() { if semver.Compare(options.Version, "v3.1.0") < 0 && options.TiFlash.Num != 0 { fmt.Println(color.YellowString("Warning: current version %s doesn't support TiFlash", options.Version)) options.TiFlash.Num = 0 } else if runtime.GOOS == "darwin" && semver.Compare(options.Version, "v4.0.0") < 0 { // only runs tiflash on version later than v4.0.0 when executing on darwin fmt.Println(color.YellowString("Warning: current version %s doesn't support TiFlash on darwin", options.Version)) options.TiFlash.Num = 0 } } type InstancePair struct { comp string pdRole instance.PDRole tiflashRole instance.TiFlashRole instance.Config } instances := []InstancePair{ {spec.ComponentTiProxy, "", "", options.TiProxy}, {spec.ComponentTiKV, "", "", options.TiKV}, {spec.ComponentPump, "", "", options.Pump}, {spec.ComponentTiDB, "", "", options.TiDB}, {spec.ComponentCDC, "", "", options.TiCDC}, {spec.ComponentTiKVCDC, "", "", options.TiKVCDC}, {spec.ComponentDrainer, "", "", options.Drainer}, {spec.ComponentDMMaster, "", "", options.DMMaster}, {spec.ComponentDMWorker, "", "", options.DMWorker}, } if options.ShOpt.Mode == "tidb" { instances = append(instances, InstancePair{spec.ComponentTiFlash, instance.PDRoleNormal, instance.TiFlashRoleNormal, options.TiFlash}, ) } else if options.ShOpt.Mode == "tidb-cse" || options.ShOpt.Mode == "tiflash-disagg" { if !tidbver.TiFlashPlaygroundNewStartMode(options.Version) { // For simplicity, currently we only implemented disagg mode when TiFlash can run without config. return fmt.Errorf("TiUP playground only supports CSE/Disagg mode for TiDB cluster >= v7.1.0 (or nightly)") } if !strings.HasPrefix(options.ShOpt.CSE.S3Endpoint, "https://") && !strings.HasPrefix(options.ShOpt.CSE.S3Endpoint, "http://") { return fmt.Errorf("CSE/Disagg mode requires S3 endpoint to start with http:// or https://") } isSecure := strings.HasPrefix(options.ShOpt.CSE.S3Endpoint, "https://") rawEndpoint := strings.TrimPrefix(options.ShOpt.CSE.S3Endpoint, "https://") rawEndpoint = strings.TrimPrefix(rawEndpoint, "http://") // Currently we always assign region=local. Other regions are not supported. if strings.Contains(rawEndpoint, "amazonaws.com") { return fmt.Errorf("Currently TiUP playground CSE/Disagg mode only supports local S3 (like minio). S3 on AWS Regions are not supported. Contributions are welcome!") } // Preflight check whether specified object storage is available. s3Client, err := minio.New(rawEndpoint, &minio.Options{ Creds: credentials.NewStaticV4(options.ShOpt.CSE.AccessKey, options.ShOpt.CSE.SecretKey, ""), Secure: isSecure, }) if err != nil { return errors.Annotate(err, "CSE/Disagg mode preflight check failed") } ctxCheck, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() bucketExists, err := s3Client.BucketExists(ctxCheck, options.ShOpt.CSE.Bucket) if err != nil { return errors.Annotate(err, "CSE/Disagg mode preflight check failed") } if !bucketExists { // Try to create bucket. err := s3Client.MakeBucket(ctxCheck, options.ShOpt.CSE.Bucket, minio.MakeBucketOptions{}) if err != nil { return fmt.Errorf("CSE/Disagg mode preflight check failed: Bucket %s doesn't exist and fail to create automatically (your bucket name may be invalid?)", options.ShOpt.CSE.Bucket) } } instances = append( instances, InstancePair{spec.ComponentTiFlash, instance.PDRoleNormal, instance.TiFlashRoleDisaggWrite, options.TiFlashWrite}, InstancePair{spec.ComponentTiFlash, instance.PDRoleNormal, instance.TiFlashRoleDisaggCompute, options.TiFlashCompute}, ) } if options.ShOpt.Mode == "tidb-cse" { instances = append( instances, InstancePair{comp: spec.ComponentTiKVWorker, Config: options.TiKVWorker}, ) } if options.ShOpt.PDMode == "pd" { instances = append([]InstancePair{{spec.ComponentPD, instance.PDRoleNormal, instance.TiFlashRoleNormal, options.PD}}, instances..., ) } else if options.ShOpt.PDMode == "ms" { if !tidbver.PDSupportMicroservices(options.Version) { return fmt.Errorf("PD cluster doesn't support microservices mode in version %s", options.Version) } instances = append([]InstancePair{ {spec.ComponentPD, instance.PDRoleAPI, instance.TiFlashRoleNormal, options.PD}, {spec.ComponentPD, instance.PDRoleTSO, instance.TiFlashRoleNormal, options.TSO}, {spec.ComponentPD, instance.PDRoleScheduling, instance.TiFlashRoleNormal, options.Scheduling}}, instances..., ) } for _, inst := range instances { for i := 0; i < inst.Num; i++ { _, err := p.addInstance(inst.comp, inst.pdRole, inst.tiflashRole, inst.Config) if err != nil { return err } } } anyPumpReady := false allDMMasterReady := false // Start all instance except tiflash. err := p.WalkInstances(func(cid string, ins instance.Instance) error { if cid == spec.ComponentTiFlash { return nil } // wait dm-master up before dm-worker if cid == spec.ComponentDMWorker && !allDMMasterReady { p.waitAllDMMasterUp() allDMMasterReady = true } err := p.startInstance(ctx, ins) if err != nil { return err } // if no any pump, tidb will quit right away. if cid == spec.ComponentPump && !anyPumpReady { ctx, cancel := context.WithTimeout(context.TODO(), time.Second*120) err = ins.(*instance.Pump).Ready(ctx) cancel() if err != nil { return err } anyPumpReady = true } return nil }) if err != nil { return err } p.booted = true tidbSucc, tiproxySucc := p.waitAllDBUp() var monitorInfo *MonitorInfo if options.Monitor { var err error p.monitor, monitorInfo, err = p.bootMonitor(ctx, env) if err != nil { return err } p.ngmonitoring, err = p.bootNGMonitoring(ctx, env) if err != nil { return err } p.grafana, err = p.bootGrafana(ctx, env, monitorInfo) if err != nil { return err } } colorCmd := color.New(color.FgHiCyan, color.Bold) if len(tidbSucc) > 0 { // start TiFlash after at least one TiDB is up. var started []*instance.TiFlashInstance for _, flash := range p.tiflashs { if err := p.startInstance(ctx, flash); err != nil { fmt.Println(color.RedString("TiFlash %s failed to start: %s", flash.Addr(), err)) } else { started = append(started, flash) } } p.tiflashs = started p.waitAllTiFlashUp() fmt.Println() color.New(color.FgGreen, color.Bold).Println("🎉 TiDB Playground Cluster is started, enjoy!") if deleteWhenExit { fmt.Println() colorstr.Printf("[yellow][bold]Warning[reset][bold]: cluster data will be destroyed after exit. To persist data after exit, specify [tiup_command]--tag [reset].\n") } fmt.Println() mysql := mysqlCommand() for _, dbAddr := range tidbSucc { ss := strings.Split(dbAddr, ":") fmt.Printf("Connect TiDB: ") colorCmd.Printf("%s --host %s --port %s -u root\n", mysql, ss[0], ss[1]) } for _, dbAddr := range tiproxySucc { ss := strings.Split(dbAddr, ":") fmt.Printf("Connect TiProxy: ") colorCmd.Printf("%s --host %s --port %s -u root\n", mysql, ss[0], ss[1]) } } if len(p.dmMasters) > 0 { fmt.Printf("Connect DM: ") endpoints := make([]string, 0, len(p.dmMasters)) for _, dmMaster := range p.dmMasters { endpoints = append(endpoints, dmMaster.Addr()) } colorCmd.Printf("tiup dmctl --master-addr %s\n", strings.Join(endpoints, ",")) } if len(p.pds) > 0 { if pdAddr := p.pds[0].Addr(); len(p.tidbs) > 0 && hasDashboard(pdAddr) { fmt.Printf("TiDB Dashboard: ") colorCmd.Printf("http://%s/dashboard\n", pdAddr) } } if p.bootOptions.ShOpt.Mode == "tikv-slim" { if p.bootOptions.ShOpt.PDMode == "ms" { var ( tsoAddr []string apiAddr []string schedulingAddr []string ) for _, api := range p.pds { apiAddr = append(apiAddr, api.Addr()) } for _, tso := range p.tsos { tsoAddr = append(tsoAddr, tso.Addr()) } for _, scheduling := range p.schedulings { schedulingAddr = append(schedulingAddr, scheduling.Addr()) } fmt.Printf("PD API Endpoints: ") colorCmd.Printf("%s\n", strings.Join(apiAddr, ",")) fmt.Printf("PD TSO Endpoints: ") colorCmd.Printf("%s\n", strings.Join(tsoAddr, ",")) fmt.Printf("PD Scheduling Endpoints: ") colorCmd.Printf("%s\n", strings.Join(schedulingAddr, ",")) } else { var pdAddrs []string for _, pd := range p.pds { pdAddrs = append(pdAddrs, pd.Addr()) } fmt.Printf("PD Endpoints: ") colorCmd.Printf("%s\n", strings.Join(pdAddrs, ",")) } } if monitorInfo != nil { p.updateMonitorTopology(spec.ComponentPrometheus, *monitorInfo) } dumpDSN(filepath.Join(p.dataDir, "dsn"), p.tidbs, p.tiproxys) go func() { // fmt.Printf("serve at :%d\n", p.port) err := p.listenAndServeHTTP() if err != nil { fmt.Printf("listenAndServeHTTP quit: %s\n", err) } }() logIfErr(p.renderSDFile()) if g := p.grafana; g != nil { p.updateMonitorTopology(spec.ComponentGrafana, MonitorInfo{g.host, g.port, g.cmd.Path}) fmt.Printf("Grafana: ") colorCmd.Printf("http://%s\n", utils.JoinHostPort(g.host, g.port)) } return nil } func (p *Playground) updateMonitorTopology(componentID string, info MonitorInfo) { info.IP = instance.AdvertiseHost(info.IP) if len(p.pds) == 0 { return } client, err := newEtcdClient(p.pds[0].Addr()) if err == nil && client != nil { if promBinary, err := json.Marshal(info); err == nil { _, err = client.Put(context.TODO(), "/topology/"+componentID, string(promBinary)) if err != nil { fmt.Println("Set the PD metrics storage failed") } } } } // Wait all instance quit and return the first non-nil err. // including p8s & grafana func (p *Playground) wait() error { err := p.instanceWaiter.Wait() if err != nil && atomic.LoadInt32(&p.curSig) == 0 { return err } return nil } func (p *Playground) terminate(sig syscall.Signal) { kill := func(name string, pid int, wait func() error) { if sig == syscall.SIGKILL { colorstr.Printf("[dark_gray]Force %s(%d) to quit...\n", name, pid) } else if atomic.LoadInt32(&p.curSig) == int32(sig) { // In case of double ctr+c colorstr.Printf("[dark_gray]Wait %s(%d) to quit...\n", name, pid) } _ = syscall.Kill(pid, sig) timer := time.AfterFunc(forceKillAfterDuration, func() { _ = syscall.Kill(pid, syscall.SIGKILL) }) _ = wait() timer.Stop() } if p.monitor != nil && p.monitor.cmd != nil && p.monitor.cmd.Process != nil { go kill("prometheus", p.monitor.cmd.Process.Pid, p.monitor.wait) } if p.ngmonitoring != nil && p.ngmonitoring.cmd != nil && p.ngmonitoring.cmd.Process != nil { go kill("ng-monitoring", p.ngmonitoring.cmd.Process.Pid, p.ngmonitoring.wait) } if p.grafana != nil && p.grafana.cmd != nil && p.grafana.cmd.Process != nil { go kill("grafana", p.grafana.cmd.Process.Pid, p.grafana.wait) } for _, inst := range p.tikvWorkers { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.dmWorkers { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.dmMasters { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.tiflashs { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.ticdcs { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.tikvCdcs { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.drainers { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } // tidb must exit earlier then pd for _, inst := range p.tidbs { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.pumps { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.tikvs { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.pds { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.tsos { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.schedulings { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } for _, inst := range p.tiproxys { if inst.Process != nil && inst.Process.Cmd() != nil && inst.Process.Cmd().Process != nil { kill(inst.Component(), inst.Pid(), inst.Wait) } } } func (p *Playground) renderSDFile() error { // we not start monitor at all. if p.monitor == nil { return nil } cid2targets := make(map[string]instance.MetricAddr) _ = p.WalkInstances(func(cid string, inst instance.Instance) error { v := inst.MetricAddr() t, ok := cid2targets[inst.Component()] if ok { v.Targets = append(v.Targets, t.Targets...) } cid2targets[inst.Component()] = v return nil }) err := p.monitor.renderSDFile(cid2targets) if err != nil { return err } return nil } // return not error iff the Cmd is started successfully. // user must and can safely wait the Cmd func (p *Playground) bootMonitor(ctx context.Context, env *environment.Environment) (*monitor, *MonitorInfo, error) { options := p.bootOptions monitorInfo := &MonitorInfo{} dataDir := p.dataDir promDir := filepath.Join(dataDir, "prometheus") monitor, err := newMonitor(ctx, options.ShOpt, options.Version, options.Host, promDir) if err != nil { return nil, nil, err } monitorInfo.IP = instance.AdvertiseHost(options.Host) monitorInfo.BinaryPath = promDir monitorInfo.Port = monitor.port // start the monitor cmd. log, err := os.OpenFile(filepath.Join(promDir, "prom.log"), os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.ModePerm) if err != nil { return nil, nil, errors.AddStack(err) } defer log.Close() monitor.cmd.Stderr = log monitor.cmd.Stdout = os.Stdout if err := monitor.cmd.Start(); err != nil { return nil, nil, err } p.instanceWaiter.Go(func() error { err := monitor.wait() if err != nil && atomic.LoadInt32(&p.curSig) == 0 { fmt.Printf("Prometheus quit: %v\n", err) } else { fmt.Println("prometheus quit") } return err }) return monitor, monitorInfo, nil } // return not error iff the Cmd is started successfully. // user must and can safely wait the Cmd func (p *Playground) bootNGMonitoring(ctx context.Context, env *environment.Environment) (*ngMonitoring, error) { options := p.bootOptions dataDir := p.dataDir promDir := filepath.Join(dataDir, "prometheus") ngm, err := newNGMonitoring(ctx, options.ShOpt, options.Version, options.Host, promDir, p.pds) if err != nil { return nil, err } // ng-monitoring only exists when tidb >= 5.3.0 _, err = os.Stat(ngm.cmd.Path) if os.IsNotExist(err) { return nil, nil } if err := ngm.cmd.Start(); err != nil { return nil, err } p.instanceWaiter.Go(func() error { err := ngm.wait() if err != nil && atomic.LoadInt32(&p.curSig) == 0 { fmt.Printf("ng-monitoring quit: %v\n", err) } else { fmt.Println("ng-monitoring quit") } return err }) return ngm, nil } // return not error iff the Cmd is started successfully. func (p *Playground) bootGrafana(ctx context.Context, env *environment.Environment, monitorInfo *MonitorInfo) (*grafana, error) { // set up grafana options := p.bootOptions if err := installIfMissing("grafana", options.Version); err != nil { return nil, err } installPath, err := env.Profile().ComponentInstalledPath("grafana", utils.Version(options.Version)) if err != nil { return nil, err } dataDir := p.dataDir grafanaDir := filepath.Join(dataDir, "grafana") cmd := exec.Command("cp", "-Rfp", installPath, grafanaDir) err = cmd.Run() if err != nil { return nil, errors.AddStack(err) } dashboardDir := filepath.Join(grafanaDir, "dashboards") err = utils.MkdirAll(dashboardDir, 0755) if err != nil { return nil, errors.AddStack(err) } // mv {grafanaDir}/*.json {grafanaDir}/dashboards/ err = filepath.Walk(grafanaDir, func(path string, info os.FileInfo, err error) error { // skip scan sub directory if info.IsDir() && path != grafanaDir { return filepath.SkipDir } if strings.HasSuffix(info.Name(), ".json") { return os.Rename(path, filepath.Join(grafanaDir, "dashboards", info.Name())) } return nil }) if err != nil { return nil, err } err = replaceDatasource(dashboardDir, clusterName) if err != nil { return nil, err } grafana := newGrafana(options.Version, options.Host, options.GrafanaPort) // fmt.Println("Start Grafana instance...") err = grafana.start(ctx, grafanaDir, options.ShOpt.PortOffset, "http://"+utils.JoinHostPort(monitorInfo.IP, monitorInfo.Port)) if err != nil { return nil, err } p.instanceWaiter.Go(func() error { err := grafana.wait() if err != nil && atomic.LoadInt32(&p.curSig) == 0 { fmt.Printf("Grafana quit: %v\n", err) } else { fmt.Println("Grafana quit") } return err }) return grafana, nil } func logIfErr(err error) { if err != nil { fmt.Println(err) } } // Check the MySQL Client version // // Since v8.1.0 `--comments` is the default, so we don't need to specify it. // Without `--comments` the MySQL client strips TiDB specific comments // like `/*T![clustered_index] CLUSTERED */` // // This returns `mysql --comments` for older versions or in case we failed to check // the version for any reason as `mysql --comments` is the safe option. // For newer MySQL versions it returns just `mysql`. // // For MariaDB versions of the MySQL Client it is expected to return `mysql --comments`. func mysqlCommand() (cmd string) { cmd = "mysql --comments" mysqlVerOutput, err := exec.Command("mysql", "--version").Output() if err != nil { return } vMaj, vMin, _, err := parseMysqlVersion(string(mysqlVerOutput)) if err == nil { // MySQL Client 8.1.0 and newer if vMaj == 8 && vMin >= 1 { return "mysql" } // MySQL Client 9.x.x. Note that 10.x is likely to be MariaDB, so not using >= here. if vMaj == 9 { return "mysql" } } return } // parseMysqlVersion parses the output from `mysql --version` that is in `versionOutput` // and returns the major, minor and patch version. // // New format example: `mysql Ver 8.2.0 for Linux on x86_64 (MySQL Community Server - GPL)` // Old format example: `mysql Ver 14.14 Distrib 5.7.36, for linux-glibc2.12 (x86_64) using EditLine wrapper` // MariaDB 11.2 format: `/usr/bin/mysql from 11.2.2-MariaDB, client 15.2 for linux-systemd (x86_64) using readline 5.1` // // Note that MariaDB has `bin/mysql` (deprecated) and `bin/mariadb`. This is to parse the version from `bin/mysql`. // As TiDB is a MySQL compatible database we recommend `bin/mysql` from MySQL. // If we ever want to auto-detect other clients like `bin/mariadb`, `bin/mysqlsh`, `bin/mycli`, etc then // each of them needs their own version detection and adjust for the right commandline options. func parseMysqlVersion(versionOutput string) (vMaj int, vMin int, vPatch int, err error) { mysqlVerRegexp := regexp.MustCompile(`(Ver|Distrib|from) ([0-9]+)\.([0-9]+)\.([0-9]+)`) mysqlVerMatch := mysqlVerRegexp.FindStringSubmatch(versionOutput) if mysqlVerMatch == nil { return 0, 0, 0, errors.New("No match") } vMaj, err = strconv.Atoi(mysqlVerMatch[2]) if err != nil { return 0, 0, 0, err } vMin, err = strconv.Atoi(mysqlVerMatch[3]) if err != nil { return 0, 0, 0, err } vPatch, err = strconv.Atoi(mysqlVerMatch[4]) if err != nil { return 0, 0, 0, err } return } tiup-1.16.3/components/playground/playground_test.go000066400000000000000000000043001505422223000226760ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "os" "os/user" "path/filepath" "testing" "github.com/stretchr/testify/assert" ) func TestPlaygroundAbsDir(t *testing.T) { err := os.Chdir("/usr") assert.Nil(t, err) a, err := getAbsolutePath("./a") assert.Nil(t, err) assert.Equal(t, "/usr/a", a) b, err := getAbsolutePath("../b") assert.Nil(t, err) assert.Equal(t, "/b", b) u, err := user.Current() assert.Nil(t, err) c, err := getAbsolutePath("~/c/d/e") assert.Nil(t, err) assert.Equal(t, filepath.Join(u.HomeDir, "c/d/e"), c) } func TestParseMysqlCommand(t *testing.T) { cases := []struct { version string vMaj int vMin int vPatch int err bool }{ { "mysql Ver 8.2.0 for Linux on x86_64 (MySQL Community Server - GPL)", 8, 2, 0, false, }, { "mysql Ver 8.0.34 for Linux on x86_64 (MySQL Community Server - GPL)", 8, 0, 34, false, }, { "mysql Ver 8.0.34-foobar for Linux on x86_64 (MySQL Community Server - GPL)", 8, 0, 34, false, }, { "foobar", 0, 0, 0, true, }, { "mysql Ver 14.14 Distrib 5.7.36, for linux-glibc2.12 (x86_64) using EditLine wrapper", 5, 7, 36, false, }, { "mysql Ver 15.1 Distrib 10.3.37-MariaDB, for Linux (x86_64) using readline 5.1", 10, 3, 37, false, }, { "/bin/mysql from 11.2.2-MariaDB, client 15.2 for linux-systemd (x86_64) using readline 5.1", 11, 2, 2, false, }, } for _, tc := range cases { vMaj, vMin, vPatch, err := parseMysqlVersion(tc.version) if tc.err { assert.NotNil(t, err) } else { assert.Nil(t, err) } assert.Equal(t, vMaj, tc.vMaj) assert.Equal(t, vMin, tc.vMin) assert.Equal(t, vPatch, tc.vPatch) } } tiup-1.16.3/doc/000077500000000000000000000000001505422223000133235ustar00rootroot00000000000000tiup-1.16.3/doc/README.md000066400000000000000000000003051505422223000146000ustar00rootroot00000000000000# TiUp documentation * [User docs](user) documentation for TiUp users, * [Dev docs](dev) documentation for TiUp developers, * [Design docs](design) documentation of our *planned* design for TiUp. tiup-1.16.3/doc/design/000077500000000000000000000000001505422223000145745ustar00rootroot00000000000000tiup-1.16.3/doc/design/README.md000066400000000000000000000012441505422223000160540ustar00rootroot00000000000000# TiUp design docs This folder contains documentation on the current and future design of TiUp. For our current status on design issues, see this [project](https://github.com/pingcap/tiup/projects/3). Some important issues we should cover: * [Requirements and goals](requirements.md) * The [manifest format and server-side file layout](manifest.md) * Testing strategy ([project](https://github.com/pingcap/tiup/projects/2)) * Support for overriding components with local versions (e.g., built from source). * Local mirrors * Components (which components we hope to support and what their CLI will look like) * Component API (for supporting custom components) * Code design tiup-1.16.3/doc/design/checkpoint.md000066400000000000000000000227771505422223000172640ustar00rootroot00000000000000# The checkpoint implementation for tiup-cluster and tiup-dm When there is an occasional error on `tiup cluster` or `tiup dm` command, some users may want to retry previews action from the fail point instead of from scratch. For example, the following tasks: ``` 1. download package | 2. scp package to remote | 3. unzip remote package <- failed here | 4. start service ``` If something wrong with the third task, retry from the first task is OK because TiUP provides a guarantee that all commands are idempotent. However, for some large clusters, it may waste a lot of time on successful tasks (task 1 and stpe 2) and the user may want to restart the process from task 3. ## The audit log tiup-cluster and tiup-dm will generate an audit log file in `${TIUP_HOME}/storage/{cluster,dm}/audit/`, you can view the audit list with the command `tiup cluster audit` or `tiup dm audit`. The list looks like this: ``` ID Time Command -- ---- ------- fxgcScKJ2Kd 2021-01-21T18:36:10+08:00 /home/tidb/.tiup/components/cluster/v1.3.1/tiup-cluster display test fxgcRrMnBz8 2021-01-21T18:35:56+08:00 /home/tidb/.tiup/components/cluster/v1.3.1/tiup-cluster start test ``` The first column is the id of the audit, to view a specified audit log, use the command `tiup cluster audit ` or `tiup dm audit `, the content of the audit log is something like this: ``` /home/tidb/.tiup/components/cluster/v1.3.1/tiup-cluster display test 2021-01-21T18:36:08.380+0800 INFO Execute command {"command": "tiup cluster display test"} 2021-01-21T18:36:09.805+0800 INFO SSHCommand {"host": "172.16.5.140", "port": "22", "cmd": "xxx command", "stdout": "xxxx", "stderr": ""} ``` The first line of the file is the command the user executed, the following lines are structure logs. ## The checkpoint In the implementation of the checkpoint, we mix checkpoint in the audit log, like this: ``` /home/tidb/.tiup/components/cluster/v1.3.1/tiup-cluster display test 2021-01-21T18:36:08.380+0800 INFO Execute command {"command": "tiup cluster display test"} 2021-01-21T18:36:09.805+0800 INFO SSHCommand {"host": "172.16.5.140", "port": "22", "cmd": "echo task 1", "stdout": "task 1", "stderr": ""} 2021-01-21T18:36:09.806+0800 INFO CheckPoint {"host": "172.16.5.140", "n": 1, "result": true} 2021-01-21T18:36:09.805+0800 INFO SSHCommand {"host": "172.16.5.140", "port": "22", "cmd": "echo task 2", "stdout": "task 2", "stderr": ""} 2021-01-21T18:36:09.806+0800 INFO CheckPoint {"host": "172.16.5.140", "n": 2, "result": true} 2021-01-21T18:36:09.805+0800 INFO SSHCommand {"host": "172.16.5.140", "port": "22", "cmd": "echo task 3", "stdout": "task 2", "stderr": ""} 2021-01-21T18:36:09.806+0800 INFO CheckPoint {"host": "172.16.5.140", "n": 3, "result": true} ``` If the user runs tiup-cluster or tiup-dm in replay mode by giving an audit id, we will parse that audit log file and pick up all `CheckPoint` by order into a queue, then in corresponding functions, we check if the checkpoint is in the queue, if hit, we dequeue the checkpoint and return the result directly instead of doing the real work. Example: ```golang func init() { // Register checkpoint fields so that we know how to compare checkpoints countHost := checkpoint.Register( checkpoint.Field("host", reflect.DeepEqual), checkpoint.Field("n", func(a, b interface{}) bool { // the n is a int, however, it will be float after it write to json because json only has float number. // so we just compare the string format. return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", b) }), ) } func processCommand() { // we will explain the context in the next section ctx := checkpoint.NewContext(context.Background()) r, err := task(ctx, host1, 1) handleResult(r, err) r, err = task(ctx, host2, 2) handleResult(r, err) r, err = task(ctx, host3, 3) handleResult(r, err) } func task(ctx context.Context, host string, n int) (result bool, err error) { // first, we get the checkpoint from audit log point := checkpoint.Acquire(ctx, countHost, map[string]interface{}{ "host": host, "n": n, }) defer func() { // we must call point.Release, otherwise there will be a resource leak. // the release function will write the checkpoint into current audit log (not the one user specified) // for latter replay. point.Release(err, zap.String("host", host), zap.Int("n", n), zap.Bool("result", result), ) }() // Then, if the checkpoint exists in the specified audit file, point.Hit() will return map[string]interface{} if point.Hit() != nil { return point.Hit()["result"].(bool), nil } // Last, if the checkpoint does not exist in the specified audit file, we should do real work and return the result return do_real_work(host, n) } ``` ## The context If the function with checkpoint calls another function with checkpoint, we will get into trouble: ```golang func processCommand() { ctx := checkpoint.NewContext(context.Background()) r, err := task(ctx, host1, 1) handleResult(r, err) r, err := task(ctx, host1, 0) handleResult(r, err) } // we use a flag mock that the task only return true once var flag = true func task(ctx context.Context, host string, n int) (result bool, err error) { point := checkpoint.Acquire(ctx, countHost, map[string]interface{}{ ... } defer func() { point.Release( ... ) } if point.Hit() != nil { ... } if n > 1 { return task(ctx, host, n - 1) } defer func() { flag = false }() return flag, nil } ``` The execution flow and return value will be: ``` task(1)[called by processCommand]: return true | task(0)[called by task(1)]: return true | task(0)[called by processCommand]: return false ``` the checkpoint in audit log will be: ``` ... {"host": "...", "n": 1, "result": true} ... {"host": "...", "n": 0, "result": true} ... {"host": "...", "n": 0, "result": false} ``` There are three checkpoints, but when we try to replay the process, the `task(0)[called by task(1)]` will not be called at all since `task(1)` will return early with the cached result, so the execution flow will be: ``` task(1)[called by processCommand]: return true (cached by {"host": "...", "n": 1, "result": true}) | task(0)[called by processCommand]: return true (cached by {"host": "...", "n": 0, "result": true}) ``` The trouble is coming: in the real case the `task(0)[called by processCommand]` returns false but in replay case it return true because it takes the result of `task(0)[called by task(1)]` by mistake. The problem is that the `CheckPoint` of `task(0)[called by task(1)]` should not be record because it's parent, `task(0)[called by processCommand]`, has record a `CheckPoint`. So we implement a semaphore and insert it into the context passed to `checkpoint.Acquire`, the context or it's ancestor must be generated by `checkpoint.NewContext` where the semaphore is generated. When `checkpoint.Acquire` called, it will try to acquire the semaphore and record if it success in the returned value, when we call `Release` on the returned value, it will check if previews semaphore acquire success, if not, the `Release` will not writing checkpoint. ## Parallel task Because we use a semaphore in the context to trace if it's the first stack layer that wants to write checkpoint, the context can't be shared between goroutines: ```golang func processCommand() { ctx := checkpoint.NewContext(context.Background()) for _, n := range []int{1, 2, 3} { go func() { r, err := task(ctx, host, n) handleResult(r, err) }() } } ``` There are three tasks, `task(1)`, `task(2)` and `task(3)`, they run parallelly. What if the `task(0)` run first but return last? ``` task(1): start -------------------------------------------> return task(2): start ------------------------> return task(3): start ------------------------> return ``` The checkpoint of `task(2)` and `task(3)` will not be recorded because they think they are called by `task(1)`. The solution is to add a semaphore for every goroutine: ```golang func processCommand() { ctx := checkpoint.NewContext(context.Background()) for _, n := range []int{1, 2, 3} { go func(ctx context.Context) { r, err := task(ctx, host, n) handleResult(r, err) }(checkpoint.NewContext(ctx)) } } ``` What if the `processCommand` or its' ancestor has its' own checkpoint? ```golang func processTask(ctx context.Context) { p := checkpoint.Acquire(...) defer func() { p.Release(...) }() if p.Hit() != nil { ... } return processCommand(ctx) } func processCommand(ctx context.Context) { for _, n := range []int{1, 2, 3} { go func(ctx context.Context) { r, err := task(ctx, host, n) handleResult(r, err) }(checkpoint.NewContext(ctx)) } } ``` If `checkpoint.NewContext` just append a unacquired semaphore, the checkpoint of `processTask` and it's children(`task(1..3)`) will be all recorded, that's not correct (we have talked this before). So the `checkpoint.NewContext` should check if there is already a semaphore in current context, if there is, just copy it's value. By this way, if `processTask` has acquired the semaphore, the `task(1..3)` will get their own acquired semaphore, otherwise, they will get their own unacquired semaphore. tiup-1.16.3/doc/design/manifest.md000066400000000000000000000705221505422223000167320ustar00rootroot00000000000000# Manifest format and repository layout See [#120](https://github.com/pingcap/tiup/issues/120). ## The Update Framework [The Update Framework (TUF)](https://theupdateframework.io/overview/) is the state of the art for update systems. It has a strong focus on security. TUF is a [specification](https://github.com/theupdateframework/specification/blob/master/tuf-spec.md#the-update-framework-specification) for building update systems, not an implementation (though reference implementations exist). It does not fully specify how such a system should work, e.g., it does not discuss how updates are applied or how servers are managed. However, it defines what metadata should be kept, how data and metadata should be signed, and how to ensure downloads are secure. We build on and adapt TUF, all places where we deviate from TUF are enumerated below. I believe we still fulfil all TUF security guarantees. ## Components A component is a program which can be run by TiUp. In this document, we do not consider what constitutes a component, for our purposes it is an opaque file. A component may have many versions. We do not discuss how versions are specified or applied, only that a client may specify a version to download. A component may be supplied by PingCAP or a third party, we detail how components are *uploaded*, but how components are packaged and checked by the client is out of scope. We assume that a component is not 'opened', i.e., a component is uploaded to a server and later downloaded, the server does not inspect the contents of the component. Note that this design document only addresses part of the security issue with handling third party code or binaries - we ensure that the code that a user downloads is the same code that the publisher uploads, however, we cannot ensure that the code uploaded is not malicious. We do not support channels or groupings of components. We expect components will support something here, but it will be up to the client to implement. TiUp itself can be treated as a component and I think does not need to be treated any differently on the server side. ## Other terms * repository: a source of components and manifests (aka server, mirror) * manifest: a JSON metadata file * platform: the hardware platform and operating system where a binary will run (c.f., built). ## Principles * URLs are persistent. * All data is immutable * Most metadata is immutable, achieved using versioning. * Components cannot be removed or deleted. * Zero-trust access control. * Minimise the number of keys and the frequency with which they must be replaced. * Uploads and downloads should be secure. * All operations should be idempotent or at least repeatable. If an operation is interrupted, the user can just retry until it succeeds. * There should be no 'transaction' mechanism between client and server, groups of operations should not require atomicity. * Minimise required knowledge in the client - the metadata should direct the client how to perform operations with minimal knowledge of the server's data layout, etc. * We should not require consistency between files in the repo (beyond hashes and keys). ## Assumptions * Updates are per-component, updating all components is interpreted by the client. * There is a secure channel to distribute initial and updated keys for each server * Don't need to change the owner of a component. * Don't need to delegate ownership of a component (owners can delegate themselves with client-side tooling). ## Repository organisation We require snapshot.json and root.json to exist at the root of the repository. There will be many n.root.json files, these must also be at the repository root. The rest of the repository organisation can be changed freely since all other locations are specified by URLs. All files and manifests should be in the repository root, i.e., there is a single directory. Each non-manifest file has the form `id-version-platform.ext`, where `ext` is `tar.gz` for tarballs, `id` is the component id, `platform` is a target triple, and `version` is a semver version containing exactly two periods (`.`, e.g., `1.0.0`). ## Manifests Each manifest is a [canonical json](http://wiki.laptop.org/go/Canonical_JSON) file with a `.json` extension. Manifest format: ``` { "signatures": [ { "keyid": "", "sig": "", } ], "signed": { "_type": "TY", "spec_version": "", "expires": "TS", // version is only present for versioned manifests. "version": n, ROLE }, } ``` where * `""` is a string. * `n` is an integer > 0. * `"TS"` is a timestamp (date and time) in ISO 8601 format; timezone should always be `Z`. * `"TY"` is one of `"root"`, `"index"`, `"component"`, `"snapshot"`, or `"timestamp"`. * `ROLE` is specific to the type of manifest. * The value of `"spec_version"` should be valid semver. The timestamp and snapshot manifests should expire after one day. All other manifests should expire after one year. URLs are relative to the repository root, which allows a repository to be cloned to a new location without changing URLs. The spec version should be the same for all files in a given snapshot, if the client finds a spec version which is inconsistent, or that the client cannot interpret, it should abort its operation. ### Keys A key definition is: ```json "KEYID": { "keytype": "ed25519", "keyval": { "public": "KEY", }, "scheme": "ed25519", }, ``` where * `"KEYID"` is a globally unique identifier (i.e., keys must not share key ids); it is the hexdigest of the SHA-256 hash of the canonical JSON form of the key. * We support `"sha256"` and `"sha512"` hashing (only SHA256 in first iteration). * We support `"rsa"`, `"ed25519"`, and `"ecdsa-sha2-nistp256"` key types (only `"rsa"` in first iteration). * We support `"rsassa-pss-sha256"`, `"ed25519"`, and `"ecdsa-sha2-nistp256"` schemes (only `"rsassa-pss-sha256"` in first iteration) - "rsassa-pss-sha256" : RSA Probabilistic signature scheme with appendix. The underlying hash function is SHA256. https://tools.ietf.org/html/rfc3447#page-29 - "ed25519" : Elliptic curve digital signature algorithm based on Twisted Edwards curves. https://ed25519.cr.yp.to/ - "ecdsa-sha2-nistp256" : Elliptic Curve Digital Signature Algorithm with NIST P-256 curve signing and SHA-256 hashing. https://en.wikipedia.org/wiki/Elliptic_Curve_Digital_Signature_Algorithm Note that the non-rsa schemes only need implementing if needed. The private keys for the timestamp, snapshot, and index roles will be stored online (with the server software, not in the repository itself), they must not be accessible to the internet. Private keys for the root role should be stored offline; any action requiring re-signing of the root manifest is an admin action which must involve a user with access to the private keys. Every manifest except root.json has key threshold 1, i.e., each manifest can be signed by a single key (except during key migrations, see the [TUF spec](https://github.com/theupdateframework/specification/blob/master/tuf-spec.md#6-usage) for more info). root.json should have a threshold of at least 3. We should create 4 (or 5?) root keys which are stored offline and separately (to reduce the risk of an attack if the place where a key is stored is compromised). For each owner, we create a new key pair in the client, the public key is stored in index.json, the private key is stored by the client. Every component manifest can be signed by either the component's owner or by the index role (this is implicit, an alternative would be to add these keyids to the component metadata in index.json, which would be more flexible and make key revocation explicit). Rotating keys is specified by [TUF](https://github.com/theupdateframework/specification/blob/master/tuf-spec.md#6-usage). Neither TUF nor TiUp will dictate how often this should happen. The TiUp server software should provide a facility for doing so, both for the root keys and for owner keys. The TiUp client should automatically rotate owner keys on a regular basis (question: how often?). TiUp server should ensure that all keys are unique, this should be verified by the client whenever it downloads a new manifest. ### root.json and n.root.json ``` "_type": "root" ``` Indexes other top-level manifests and roles. n is the version, versioned files are immutable, root.json is a copy of the most recent version of n.root.json (or a symlink, if that works with the web server) and is mutable. Example: ``` "roles": { "root": { "url": "/root.json", "keys": { "65171251a9aff5a8b3143a813481cb07f6e0de4eb197c767837fe4491b739093": { "keytype": "ed25519", "keyval": { "public": "edcd0a32a07dce33f7c7873aaffbff36d20ea30787574ead335eefd337e4dacd", }, "scheme": "ed25519", }, }, "threshold": 1, }, "index": { "url": "/index.json", "keys": { "5e777de0d275f9d28588dd9a1606cc748e548f9e22b6795b7cb3f63f98035fcb": { "keytype": "rsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA0GjPoVrjS9eCqzoQ8VRe\nPkC0cI6ktiEgqPfHESFzyxyjC490Cuy19nuxPcJuZfN64MC48oOkR+W2mq4pM51i\nxmdG5xjvNOBRkJ5wUCc8fDCltMUTBlqt9y5eLsf/4/EoBU+zC4SW1iPU++mCsity\nfQQ7U6LOn3EYCyrkH51hZ/dvKC4o9TPYMVxNecJ3CL1q02Q145JlyjBTuM3Xdqsa\nndTHoXSRPmmzgB/1dL/c4QjMnCowrKW06mFLq9RAYGIaJWfM/0CbrOJpVDkATmEc\nMdpGJYDfW/sRQvRdlHNPo24ZW7vkQUCqdRxvnTWkK5U81y7RtjLt1yskbWXBIbOV\nz94GXsgyzANyCT9qRjHXDDz2mkLq+9I2iKtEqaEePcWRu3H6RLahpM/TxFzw684Y\nR47weXdDecPNxWyiWiyMGStRFP4Cg9trcwAGnEm1w8R2ggmWphznCd5dXGhPNjfA\na82yNFY8ubnOUVJOf0nXGg3Edw9iY3xyjJb2+nrsk5f3AgMBAAE=\n-----END PUBLIC KEY-----", }, "scheme": "rsassa-pss-sha256", }, }, "threshold": 1, }, "snapshot": {...}, "timestamp": {...}, }, ``` ### n.index.json ``` "_type": "index" ``` Lists owners and components. File is versioned and immutable, n is the version. Replaces the targets role in TUF. Example: ```json "owners": { "pingcap": { "name": "PingCAP", "threshold": 1, "keys": { "4e777de0d275f9d28588dd9a1606cc748e548f9e22b6795b7cb3f63f98035fcb": { "keytype": "rsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEA0GjPoVrjS9eCqzoQ8VRe\nPkC0cI6ktiEgqPfHESFzyxyjC490Cuy19nuxPcJuZfN64MC48oOkR+W2mq4pM51i\nxmdG5xjvNOBRkJ5wUCc8fDCltMUTBlqt9y5eLsf/4/EoBU+zC4SW1iPU++mCsity\nfQQ7U6LOn3EYCyrkH51hZ/dvKC4o9TPYMVxNecJ3CL1q02Q145JlyjBTuM3Xdqsa\nndTHoXSRPmmzgB/1dL/c4QjMnCowrKW06mFLq9RAYGIaJWfM/0CbrOJpVDkATmEc\nMdpGJYDfW/sRQvRdlHNPo24ZW7vkQUCqdRxvnTWkK5U81y7RtjLt1yskbWXBIbOV\nz94GXsgyzANyCT9qRjHXDDz2mkLq+9I2iKtEqaEePcWRu3H6RLahpM/TxFzw684Y\nR47weXdDecPNxWyiWiyMGStRFP4Cg9trcwAGnEm1w8R2ggmWphznCd5dXGhPNjfA\na82yNFY8ubnOUVJOf0nXGg3Edw9iY3xyjJb2+nrsk5f3AgMBAAE=\n-----END PUBLIC KEY-----", }, "scheme": "rsassa-pss-sha256", }, "59a4df8af818e9ed7abe0764c0b47b4240952aa0d179b5b78346c470ac30278d": { "keytype": "ed25519", "keyval": { "public": "edcd0a32a07dce33f7c7873aaffbff36d20ea30787574ead335eefd337e4dacd", }, "scheme": "ed25519", }, }, }, }, "components": { "tidb": { "yanked": false, "owner": "pingcap", "url": "/name_of.json", }, }, "default components": [ "tidb", "tikv", "pd", ] ``` Each owner id and component id must be unique (TiUp should treat owner and component ids as distinct types, but ids must be unique within the union of the types). ### n.xxx.json ``` "_type": "component" ``` Where xxx is the id of the component, n is the version of the manifest, file is immutable. ``` "description": "This is a component", "nightly": "v0.2.0-nightly+20200525", "platforms": { "x86_64-apple-darwin": { "v0.0.1": { "yanked": false, "url": "/name-of_x86_64-apple-darwin_v0.0.1.tar.gz", "hashes": { "sha256": "141f740f53781d1ca54b8a50af22cbf74e44c21a998fa2a8a05aaac2c002886b", "sha512": "ef5beafa16041bcdd2937140afebd485296cd54f7348ecd5a4d035c09759608de467a7ac0eb58753d0242df873c305e8bffad2454aa48f44480f15efae1cacd0" }, "length": 1001000499, "dependencies": { "foo": "v1.0.0", }, }, "v0.2.0": { ... }, }, "aarch64-unknown-linux": { ... }, }, ``` The platform id should be one of the supported TiUp target triples. Version ids must be valid semver. Dependencies are a map from component id to a semver version string. The "nightly" points to the version number of latest daily build, that version should be in the version list of all supported platforms. The version number of nightly build should (but not forced to) be in the following format: ``` vX.Y.Z-nightly-YYYY-mm-dd ``` Where `vX.Y.Z` is the version of the last released version of that component. ### snapshot.json ``` "_type": "snapshot" ``` Lists the latest version number for each manifest. Used for consistency, to check for updates, and security. Not versioned. ```json "meta": { "root.json": { "version": 1 "length": 500 }, "index.json": { "version": 1 "length": 500 }, "name_of.json": { "version": 1, "length": 500 }, "foo.json": { "version": 1, "length": 240 } }, ``` ### timestamp.json ``` "_type": "timestamp" ``` Short timeout, basically just a hash of snapshot.json. This file has a version, but we don't keep versioned files on the server, just one timestamp.json. This file is mutable. ```json "meta": { "snapshot.json": { "hashes": { "sha256": "8f88e2ba48b412c3843e9bb26e1b6f8fc9e98aceb0fbaa97ba37b4c98717d7ab" }, "length": 515, } }, ``` ## Deviations from TUF * Exactly one layer of delegation to owners; no terminating of delegation. * keys are inlined since we do not support sharing keys. * We use some aspects of consistent snapshots for security, availability, and convenience, but we do not guarantee consistent snapshots. * targets.json -> index.json; no delegations section, replaced with list of owners (no delegated targets manifests, these are effectively inlined into index). * Added component.json to deal with multiple versions and platforms of each component. * No mirrors.json (optional anyway). * index and snapshot role keys must be kept online since these manifests must be updated by the server when a new component or version is uploaded by a user. ## Workflows * All downloaded manifests are kept between operations. Once an updated version has been downloaded and verified, the old version can be deleted. * All syntax is just a strawman and is not proposed as part of the design for TiUp. * In these workflows, if verification of a component or manifest fails, the operation may be retried or an error given to the user. In most cases we can retry as verification error may have been caused by a network error or otherwise be transient. ### Install TiUp client * Assume there is some secure mechanism for shipping * TiUp ships with the URL of the PingCAP repository and a recent copy of n.root.json * Go through the update root flow (see below) * Download and verify timestamp.json * Use timestamp.json to download and verify snapshot.json * Use snapshot.json to download and verify index.json * Use index.json and snapshot.json to install default components using the install/update component flow ### Download a component version for a target #### one component e.g., `tiup update foo` * Download and verify timestamp.json, if the hash of snapshot.json is unchanged, then finish with nothing to update. - If timestamp.json cannot be verified, it is possible that the key has changed. Retry after updating root.json * Update root.json * Download and verify timestamp.json (finish if it has not changed) * Use timestamp.json to download and verify snapshot.json * Use snapshot.json to download and verify index.json * Check index.json for new default components, install them using the install/update component flow * If the component has been yanked, inform the user. * Use index.json to download and verify the component manifest (finish if the signature has not changed) * For each platform - for each major version the user has installed, find the most recent, non-yanked recent version. - if the most recent version is more recent than the installed version, download and verify it, then install the new component and delete the old component. - if there is a new major version, download the most recent version of it and tell the user Note that we do not specify what should happen to existing versions or what should be communicated to the user #### all components e.g., `tiup update --all` * Download and verify timestamp.json, if the hash of snapshot.json is unchanged, then finish with nothing to update. - If timestamp.json cannot be verified, it is possible that the key has changed. Retry after updating root.json * Update root.json * Download and verify timestamp.json (finish if it has not changed) * Use timestamp.json to download and verify snapshot.json * Use snapshot.json to download and verify index.json * Check index.json for new default components, install them using the install/update component flow * For each component the user has installed (excluding TiUp) - If the component has been yanked, inform the user and continue to next component. - Use index.json to download and verify the component manifest (continue to next component if the signature has not changed) - For each platform * for each major version the user has installed, find the most recent, non-yanked version. * if the most recent version is more recent than the installed version, download and verify it, then install the new component and delete the old component. * if there is a new major version, download the most recent version of it and tell the user #### Self update e.g., `tiup update --self` Follow steps for one component workflow. ### Add a new owner Probably an implicit action when a user publishes their first component. * Generate a new key pair in the client. - The private key is stored securely. - The private key should persist if TiUp is updated or reinstalled (assuming the user does not find and delete the data) - The private key may be exported and imported for transfer to a different TiUp. * Update root.json * Send new owner request from TiUp client to server including the client's public key but not private key; request must be sent over TLS to ensure that the public key is not tampered with. * Check that owner id and name are unique. * Update index.json (by adding new owner), snapshot.json, and timestamp.json. * Return success. ### Add a new component e.g., `tiup publish foo`. Not specifying the client-side work, assume we end up with a tar ball and a generated manifest containing a single version for at least one platform. * Update root.json * Download and verify timestamp.json * Use timestamp.json to download and verify snapshot.json * Use snapshot.json to download and verify index.json * Verify that the user's entry in index.json is as expected. * Client signs manifest and sends manifest and tar ball to the server * Server verifies the manifest using the clients public key from most recent index.json. * Verify that the component id is unique and that the platform is valid. * Create a component directory and platform sub-directory. * Store the component tar ball and manifest (with version number 1) in the new directories. * Return success to the client. * Client downloads timestamp.json, snapshot.json, index.json, and the new component manifest and verifies they are as expected. ### Add a new version of a component * Update root.json * Download and verify timestamp.json * Use timestamp.json to download and verify snapshot.json * Use snapshot.json to download and verify index.json * Verify that the user's entry and the component's entry in index.json are as expected. * Use snapshot.json to download and verify the component's manifest, check it is as expected. * Create a new manifest by adding the new version (and a new platform, if required). * Client signs manifest and sends manifest and tar ball to the server * Server verifies the manifest using the clients public key from most recent index.json. * Verify that the platform is valid. * Create a platform sub-directory, if necessary. * Store the component tar ball and manifest (with incremented version number) in the new directories. * Return success to the client. * Client downloads timestamp.json, snapshot.json, index.json, and the new component manifest and verifies they are as expected. ### Yank a version of a component * The version of the component is marked as yanked in the component manifest (for all platforms on which it exists). * snapshot.json and timestamp.json are updated ### Yank all versions of a component * The component is marked as yanked in index.json * Every version of the component is marked as yanked in the component manifest. * snapshot.json and timestamp.json are updated ### Add a repository to the client. Adding a repo is trivial - we just need a URL and trusted root manifest, then follow the above workflow for installing the TiUp client to initialise the repo on the client side. However, the root manifest must be securely distributed. I believe this can be done by requesting the most recent root.json from the server using TLS (question: is this good enough?). ### Rotate a top-level key Whether a key is rotated due to it being compromised or due to regular rotation, the procedure is the same. A top-level key cannot be removed without being replaced. This is an entirely server-side action. * Produce a new key pair. * Store the new private key or give it to the administrator to store securely. * Update root.json to add the new public key and remove the old one. * If replacing a root key, the new root.json should be signed with both the old and the new keys. * If a non-root key is replaced, create a new version of the relevant manifest and sign with the new key (and any older, non-replaced keys). - Note that if the index role is having a key rotated, then any component manifests signed with that key must be re-signed, as well as index.json. * Any copies of the new private key are securely destroyed * snapshot.json and timestamp.json are updated ### Rotate an owner's key * The user initiates a key rotation with TiUp client. * The client generates a new key pair. * The client sends the new public key to the server, signed using the old private key. * The client downloads (via timestamp.json, etc.), verifies, re-signs, and sends back to the server a component manifest for each component the user owns. * The server replaces the owner's entry in index.json with the new public key. * The server replaces each component manifest with the new one. * snapshot.json and timestamp.json are updated. * The server returns success to the client. * The client downloads all its manfests using via the new timestamp.json and verifies they are as expected. ### Owner key is compromised or lost This is an admin action. The identity of the owner must be established offline, this is an obvious channel for a phishing attack, so identifying the owner should be taken very seriously. * The owner's client generates a new key pair. * The client re-signs all component manifests. * The client sends the public key and all component manifests to the server using TLS. * The client's identify is verified offline. * index.json and all component manifests are updated; snapshot.json and timestamp.json are updated. * Client updates and verifies index.json and component manifests. ### Ban/suspend an owner This is an admin action performed on the server. * All an owners components are marked as yanked. * The owner's public key is removed from index.json (it should be stored somewhere off-server). * Update snapshot.json and timestamp.json We might want to add a notification mechanism so that any clients who use one of the owner's components can be notified to remove it if there is a security risk. This could be checked on update and when a yanked component is downloaded. ### Clone part or all of a repo * The server uses snapshot.json to copy the most recent version of root.json, index.json, and each component manifest and component. - If only part of the repository is required, components and their manifests can be filtered in this step. * Each manifest is set to version 1, existing versions are discarded. * All non-owner keys are removed from manifests. * The altered copies and moved to the new location. * New root keys are generated and saved into the manifests. * The new server creates snapshot and timestamp manifests. ## Underlying flows ### Update root The client must have some trusted version of root.json, this is either the last version that has been successfully downloaded or the version shipped with the client. For a new repository, a trusted root.json must have been sent some how. * The client repeatedly downloads new versions of n.root.json (by incrementing n) until a download fails. * At each increment, the client verifies the signature of the new root.json using the keys in both the old and new root.json. At each increment, the version number and spec version number must be equal or greater than the old ones. * Once we have the most recent root.json, the client can then verify this is indeed the most up to date version by comparing it to the unversioned root.json. * The client must check the expiration of the timestamp of this version. * If keys have changed from the starting root.json for any role, delete the corresponding manifests (so they will be downloaded fresh as needed). ### Download and verify manifest Whenever downloading a component or manifest, the first step is to download timestamp.json, if that has not changed since the previous access, then nothing else in the repository has changed either. The downloaded timestamp.json must not have expired, if it has then report an error to the user. For any download, if there is a known length of file, download only that many bytes. Files without a known length should have a default maximum size known by the client. Preconditions: the client has downloaded snapshot.json and timestamp.json. The client has trusted keys for all top-level roles stored locally. * Verify the signature of timestamp.json, - if it is signed with a new key, then follow the flow for updating the root manifest and then verify with the new timestamp key. * Verify the expiration timestamp of timestamp.json. * Verify the signature and expiration of the timestamp of snapshot.json. * Use snapshot.json to find the URL of the manifest to download and fetch it. - For a component manifest, also download index.json - Verify the signature and timestamp of index.json using the index role's keys in root.json - Find the component's owner's keys via index.json (a component may also be signed by the index role) * Verify the manifest's signature with its role's key. Verify its timestamp has not expired. Verify its version and spec version have increased or are equal to the old version numbers. ### Download and verify component Precondition: the client has downloaded and verified the component's manifest and index.json. * Find the required platform and version in the component. If it has been yanked or the component has been yanked in index.json, abort and/or warn the user. * Download the component from the URL in the component manifest up to its specified length. * Verify the downloaded file with the hash in the component manifest. ### Update manifest #### Versioned manifest * Read the existing version of the manifest to ensure it has the same version number as the manifest when we read it, if not, abort. * Increment the version number, call the new version number `m` and write the new manifest to m.manifest-name.json, this write operation should fail if there is a file with that name which already exists, in that case, abort. * Note that the old version of the manifest is kept. #### Unversioned manifest * Only the admin user should be able to modify an unversioned manifest, we should ensure in software that there is no concurrent modification. * The new manifest is written to a temporary file on disk * The old manifest is deleted * The temporary manifest is renamed to the name of the manifest tiup-1.16.3/doc/design/requirements.md000066400000000000000000000207551505422223000176520ustar00rootroot00000000000000# Requirements and goals Includes the overlapping projects of TiUp, TiUp Cluster, and TiUp Playground. Priorities: * P0 must be shipped in initial release (blockers) * P1 should be shipped in initial release but don't block * P2 should ship in an early release * P3 would be nice to have ## Vision and high-level goals * Help expand the TiDB ecosystem by making it easy to share, install, and deploy components (P2) * Manage and deploy TiDB and TiKV clusters (P0) * Open source project, with a community of users and contributors (P1) * Help TiDB and TiKV developers run and test their code in a production-like environment (P2) * Make TiDB an easy to use database (P1) * Be the standard way for TiDB users to manage their clusters (P2) * Make it easy to try out TiDB, on a single machine and a cluster (P2) ## Use cases * A new user with no experience with TiDB can try out TiDB with minimal friction (P2) * A developer of TiDB or TiKV can setup a local environment to test, benchmark, and debug their code (P2) * An oncall DBA or developer can quickly setup a cluster matching one where a bug was reported (P1) * A user reporting a bug can provide clear steps to reproduce based on a TiUp cluster configuration (P1) * A TiDB DBA can manage a production cluster, including TODO (P0,1,2,3) ## Requirements ### Manage components * Install and update components (P0) * Uninstall/remove (P1) * Provide information about installed components and how to use them to the user (P0) * How must components be written? Restrictions on language, API? Sandboxing? (P2) * Components can be run directly from TiUp, components may run other components if they are dependencies (see versioning) (P0) - E.g., running tidb can launch pd and tikv components - Initially, this does not need explicit support in TiUp #### Versioning * Components have an explicit version (P0) * Components should use strict semver versioning (P0) * Version and semver compatibility is trusted, not verified (P0) * Users can specify which versions of each component to install (P1) * Component versions can be 'yanked', this means they are not listed and if the user tries to install them, they will be prompted for confirmation (P2) * Component versions cannot be deleted from servers (P0) * Component versions can be deleted locally (without removing the whole component) (P1) * Update (P1) - patch revisions will be updated by default (i.e., the user must opt-in to keeping older versions) - major and minor revisions will be duplicated by default (i.e., old versions are kept) * Users can have multiple versions of components installed and can specify which version will be launched by default (P2) * Users can run any version of a component that they have installed (P2) * The user may specify overrides of a component (P3) - any component may be overridden - an override may apply to any version, a specific versions, or a range of versions - an override may use another version or another component (of the same or different version) or a local binary - overrides apply to ways of running a component - when the user runs a component explicitly, the user can cause the override to be ignored #### Dependencies (P3) * Components can run other components, these are dependent components * Dependent components may or may not be managed by TiUp, but managed is recommended - If a dependent component is managed by TiUp it is specified explicitly and declaratively in the component's manifest. - If a dependent component is not managed by TiUp, then it is not specified explicitly and the component must handle download, version management, platform handling, running, etc. Managed dependent components: * Components may depend on other components and specify versions of those dependencies with which they are compatible * By default, TiUp will not let components run incompatible components, this may be forced by the user * TiUP will select which version to run (whether run by the user or another component) as follows: - if the version is constrained by the user, that constraint has highest priority - if the version is constrained by a (transitive-)dependee component, that constraint has second priority - these constraints can be ignored if specified by the user - if an override is specified by the user that overrides any version which fits the above constraints, then it is run - otherwise, the most recent installed version of a component which satisfies the constraints is run - if the constraints cannot be satisfied by any installed version, TiUp will show an error * When updating a component, by default, dependent components will be updated if necessary ### Platforms (P2) * Components may support multiple platforms (i.e., combinations of processor and operating system) * TiUp will install and run the correct platform of a component depending on where the component is run (which is not necessarily where TiUp is run) * A single version of TiUP shall support targets on different platforms (P3) ### Third-party components (P2) * Users should be able to publish their own components to public and private servers * Users can install third-party components and use them like the standard components * TiUp should manage version compatibility between components * Users should be able to search for components * Users who have published a component should be able to hide it (but not delete it), or do so with specific versions * Only the user who published a component should be able to publish new versions of that component * Users should be able to publish multiple components and be able to list them ### Self-update (P1) * Update TiUp from remote and local servers * Uninstall TiUp ### Deployment and managing a deployed cluster TODO Better error messages when setting up and managing a cluster ### Sources Also called mirrors and servers * PingCAP will host a publicly available server (P0) * Any third party may host a public or private server (P2) - Users must opt-in to using a source - Sources are strictly ordered for a given user (and can be changed by the user) - Sources cannot be specified per-component - All sources are checked in turn for a component - If updating a component causes it to be installed from a different source, the user should be prompted * The user may create a local server, that server is created empty (P1) * Components can be cloned from any source to a local source, either a specific version, all versions, or a range of versions (P3) * TiUp can manage a server (possibly with a non-standard component); users should not have to manually edit any part of a server (P3) * Hosts of servers are responsible for any authentication, TiUp will not keep servers private but will provide API hooks for hosts to do so (P0) ### Supported products (P0) * TiDB * TiKV (with and without TiDB) * TiFlash * Binlog * Blackbox probe * Prometheus and Grafana TiUp should be extensible with user-provided components (P2). ### Non-functional requirements In priority order * Reliability - could bugs in cluster management cause severe issues? That might be important. I think delaying update is not critical as long as it is not for too long, perhaps <1hr? * Security - we must be able to securely download files and manage clusters since the DB is likely to be security-critical for users. Download binaries is likely to be over the internet. Cluster management might include cross-data center communication on a public network. No need to protect data at rest. * Ease of contribution and development velocity - should be prioritised to help OSS community building. * Performance - performance is not critical, but the CLI should be responsive (low latency). Since most tasks are expected to be long-running, there is not much pressure for high performance. * Memory/disk usage - unlikely to be a problem. We should provide mechanisms for automatic and manual recovery of disk space, since in the long-term it might become an issue. * Scale - should support our largest users, i.e., 1000s of nodes in a cluster and nodes in different data centers. * Backwards compatibility - TODO how much do we have to preserve workflows and scripts between versions? ### Non-goals Things we intend not to do, either for now or ever. * Designed to be primarily used by humans, secondarily by scripts. Not intended as an API. * No GUI. * Project is specific to TiDB and TiKV, not a generic database or software management tool (but if components that do such things work, then that is ok). * No intent to support custom commands (c.f., components). * We should avoid providing any build system functionality tiup-1.16.3/doc/dev/000077500000000000000000000000001505422223000141015ustar00rootroot00000000000000tiup-1.16.3/doc/dev/README.md000066400000000000000000000063621505422223000153670ustar00rootroot00000000000000# Developer documentation ## Building You can build TiUP on any platform that supports Go. Prerequisites: * Go (minimum version: 1.21; [installation instructions](https://golang.org/doc/install)) * golint (`go get -u golang.org/x/lint/golint`) * make To build TiUP, run `make`. ## Running locally for development For development, you don't want to use any global directories. You may also want to supply your own metadata. TiUP can be modified using the following environment variables: * `TIUP_VERBOSE` this enables verbose logging when set to `enabled`. * `TIUP_HOME` the profile directory, where TiUP stores its metadata. If not set, `~/.tiup` will be used. * `TIUP_MIRRORS` set the location of TiUP's registry, can be a directory or URL. If not set, `https://tiup-mirrors.pingcap.com` will be used. > **Note** > TiUP need a certificate file (root.json) installed in `${TIUP_HOME}/bin` directory. If this is your first time getting TiUP, you can run `curl https://tiup-mirrors.pingcap.com/root.json -o ${TIUP_HOME}/bin/root.json` to get it installed. ## Testing TiUP has unit and integration tests; you can run unit tests by running `make test`. Unit tests are alongside the code they test, following the Go convention of using a `_test` suffix for test files. Integration tests are in the [tests](../../tests) directory. ## Architecture overview Each TiUP command has its own executable, their source is in the [cmd](../../cmd) directory. The main TiUP executable is [root.go](../../cmd/root.go). The core of TiUP is defined in the [pkg](../../pkg) directory. [localdata](../../pkg/localdata) manages TiUP's metadata held on the user's computer. [meta](../../pkg/meta) contains high-level functions for managing components. [repository](../../pkg/repository) handles remote repositories. The [set](../../pkg/set), [tui](../../pkg/tui), and [utils](../../pkg/utils) packages contain utility types and functions. The [version](../../pkg/version) package contains version data for TiUP and utility functions for handling that data. The [mock](../../pkg/utils/mock) package is a utility for testing. [embed](../../embed) contains static files used by builtin components (mainly `cluster` as of now), the template files are in [embed/templates](../../embed/templates) directory. Some key concepts: * *Repository* a source of components and metadata concerning those components and TiUP in general. * *Profile* the state of an installation of TiUP and the components it manages. * *Component* a piece of software that can be managed by TiUP, e.g., TiDB or the playground. * *Command* a top-level command run by TiUP, e.g., `update`, `list`. ### TiUP registry structure * tiup-manifest.index: the manifest file in json format. * Manifests for each component named tiup-component-$name.index, where %name is the name of the component. * Component tarballs, one per component, per version; named $name-$version-$os-$arch.tar.gz, where $name and $version identify the component, and $os and $arch are a supported platform. Each tarball has a sha1 hash with the same name, but extension .sha1, instead of .tar.gz. ### Manifest formats See `ComponentManifest` and `VersionManifest` data structures in [component.go](../../pkg/repository/types.go) and [version.go](../../pkg/version/version.go). tiup-1.16.3/doc/dev/release-note-guide.md000066400000000000000000000031171505422223000201030ustar00rootroot00000000000000# Release Notes Language Style Guide When you write a release note for your pull request, make sure that your language style meets the following rules: 1. Include `ACTION REQUIRED:` at the beginning if the change requires user action, e.g. deprecating or abandoning features: - ACTION REQUIRED: Add the `timezone` support for [all charts] Then, add label `release-note-action-required` onto the PR. This is required by [the tool we use to generate change log](generate-changelog.md). 2. Every note starts with the "do" form of a verb. For example: - Support backup to S3 with [Backup & Restore (BR)](https://github.com/pingcap/br) - Fix Docker ulimit configuring for the latest EKS AMI 3. Ensure no period at the end of note. 4. Use a single backquote (``) to frame the following elements in your release notes: - Custom Resource name, e.g. `TidbCluster`, `Backup` - Kubernetes Resource name, e.g. `Pod`, `StatefulSet` - Configuration item name, e.g. `.spec.version` - Variable name - Variable value - Error message - Field name 5. Pay attention to the capitalization of the following terms that are often misspelled: - PD, TiKV, TiDB (not pd, tikv, tidb) - TiDB Operator (not tidb operator) - TiDB Binlog (not tidb binlog) 6. The following templates are commonly used in release notes: - Fix the issue that ... when doing (an operation)/ when (... occurs) - Fix the issue that ... because ... (the cause of the problem) - Add the feature of (something/doing something) to do (the purpose) - Support (something/doing something) tiup-1.16.3/doc/rfcs/000077500000000000000000000000001505422223000142605ustar00rootroot00000000000000tiup-1.16.3/doc/rfcs/0001-separate-component-version-in-cluster.md000066400000000000000000000040731505422223000245360ustar00rootroot00000000000000# Separate Component Version in Cluster ## Summary Add a version field to each component on topology file. Allow user to upgrade single component to given version. ## Motivation - New component TiDB Dashboard need to deployed with TiDB but not release with TiDB. Need a way to specify Dashboard version when deployment and upgrade. - User want to upgrade node_exporter without upgrade TiDB cluster - Maybe it could replace tiup cluster patch function to provide patch version of single component ## Detailed design 1. ~~add "latest" alias to tiup download function.It cloud be used to download latest release package of component.~~ just use "" as latest alias 2. add ComponentVersions struct to topology ``` // ComponentVersions represents the versions of components ComponentVersions struct { TiDB string `yaml:"tidb,omitempty"` TiKV string `yaml:"tikv,omitempty"` TiFlash string `yaml:"tiflash,omitempty"` PD string `yaml:"pd,omitempty"` Dashboard string `yaml:"tidb_dashboard,omitempty"` Pump string `yaml:"pump,omitempty"` Drainer string `yaml:"drainer,omitempty"` CDC string `yaml:"cdc,omitempty"` TiKVCDC string `yaml:"kvcdc,omitempty"` TiProxy string `yaml:"tiproxy,omitempty"` Prometheus string `yaml:"prometheus,omitempty"` Grafana string `yaml:"grafana,omitempty"` AlertManager string `yaml:"alertmanager,omitempty"` } ``` 3. add node_exporter and blackbox_exporter version to MonitoredOptions struct ``` MonitoredOptions struct { ... NodeExporterVersion string `yaml:"node_exporter_version,omitempty" default:"latest"` BlackboxExporterVersion string `yaml:"blackbox_exporter_version,omitempty" default:"latest"` ... } ``` 4. Add CalculateVersion for each component struct. It returns cluster version if component version is not set for components like pd, tikv. It returns "" by default for components like alertmanager 5. Add flags to specify component versions 6. Merge ComponentVersion struct when scale-out 7. apply those version to deploy,scale-out and upgrade command tiup-1.16.3/doc/rfcs/0002-support-run-component-without-tiup.md000066400000000000000000000021401505422223000241540ustar00rootroot00000000000000# Support Run Component without TiUP ## Summary Allow users to only use TiUP during installation and upgrade, without needing to invoke TiUP while using the component. ## Motivation - Some users do not like the mandatory update checks and additional outputs by TiUP before invoking the components. - For programs that are not only distributed through TiUP, the existing usage patterns have led to documentation fragmentation. ## Detailed design 1. Add `tiup link [:version]` command to add soft link to $TIUP_HOME/bin 2. Add `tiup unlink [:version]` command to delete soft link from $HOME/bin 3. Add `--link` flag to `tiup install` and `tiup update` command to link while install/update 4. Mark these command and flag as experimental feature and we keep the old behavior as the default usage method. Warn: Users may need to manually set environment variables for certain components, such as ctl. 5. There is an additional benefit that user could use `tiup update tiup v1.13.0` to update tiup itself to specified version.And it makes TiUP unnecessary to handle special upgrades for itself.tiup-1.16.3/doc/user/000077500000000000000000000000001505422223000143015ustar00rootroot00000000000000tiup-1.16.3/doc/user/README.md000066400000000000000000000152211505422223000155610ustar00rootroot00000000000000# TiUP `tiup` is a tool to download and install TiDB components. For detailed documentation, see the manual which starts with an [overview](overview.md). To get started with TiDB using TiUP, see the [TiDB quick start guide](https://docs.pingcap.com/tidb/stable/quick-start-with-tidb). To deploy TiDB using TiUP in a production-like environment, see the [TiDB deployment guide](https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup). ## Installation ```sh curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh ``` ## Quick start ### Run playground ```sh tiup playground ``` ### Install components ```sh tiup install tidb tikv pd ``` ### Uninstall components ```sh tiup uninstall tidb tikv pd ``` ### Update components ```sh tiup update --all ``` # Usage After installing `tiup`, you can use it to install binaries of TiDB components and create clusters. ``` TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system. You can run a specific version of a component via "tiup [:version]". If no version number is specified, the latest version installed locally will be used. If the specified component does not have any version installed locally, the latest stable version will be downloaded from the repository. Usage: tiup [flags] [args...] tiup [flags] [args...] Available Commands: install Install a specific version of a component list List the available TiDB components or versions uninstall Uninstall components or versions of a component update Update tiup components to the latest version status List the status of instantiated components clean Clean the data of instantiated components mirror Manage a repository mirror for TiUP components telemetry Controls things about telemetry completion Output shell completion code for the specified shell (bash or zsh) env Show the list of system environment variable that related to TiUP help Help about any command or component Components Manifest: use "tiup list" to fetch the latest components manifest Flags: -B, --binary [:version] Print binary path of a specific version of a component [:version] and the latest version installed will be selected if no version specified --binpath string Specify the binary path of component instance --help Help for this command --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string -T, --tag string Specify a tag for component instance -v, --version Print the version of tiup Component instances with the same "tag" will share a data directory ($TIUP_HOME/data/$tag): $ tiup --tag mycluster playground Examples: $ tiup playground # Quick start $ tiup playground nightly # Start a playground with the latest nightly version $ tiup install [:version] # Install a component of specific version $ tiup update --all # Update all installed components to the latest version $ tiup update --nightly # Update all installed components to the nightly version $ tiup update --self # Update the "tiup" to the latest version $ tiup list # Fetch the latest supported components list $ tiup status # Display all running/terminated instances $ tiup clean # Clean the data of running/terminated instance (Kill process if it's running) $ tiup clean --all # Clean the data of all running/terminated instances Use "tiup [command] --help" for more information about a command. ``` The official components available are ``` Name Owner Description ---- ----- ----------- alertmanager pingcap Prometheus alertmanager bench pingcap Benchmark database with different workloads blackbox_exporter pingcap Blackbox prober exporter br pingcap TiDB/TiKV cluster backup restore tool cdc pingcap CDC is a change data capture tool for TiDB client pingcap Client to connect playground cluster pingcap Deploy a TiDB cluster for production ctl pingcap TiDB controller suite diag pingcap Diagnostic Collector dm pingcap Data Migration Platform manager dm-master pingcap dm-master component of Data Migration Platform dm-worker pingcap dm-worker component of Data Migration Platform dmctl pingcap dmctl component of Data Migration Platform drainer pingcap The drainer component of TiDB binlog service dumpling pingcap Dumpling is a CLI tool that helps you dump MySQL/TiDB data errdoc pingcap Document about TiDB errors grafana pingcap Grafana is the open source analytics & monitoring solution for every database insight pingcap TiDB-Insight collector node_exporter pingcap Exporter for machine metrics package pingcap A toolbox to package tiup component pd pingcap PD is the abbreviation for Placement Driver. It is used to manage and schedule the TiKV cluster pd-recover pingcap PD Recover is a disaster recovery tool of PD, used to recover the PD cluster which cannot start or provide services normally playground pingcap Bootstrap a local TiDB cluster for fun prometheus pingcap The Prometheus monitoring system and time series database pump pingcap The pump component of TiDB binlog service pushgateway pingcap Push acceptor for ephemeral and batch jobs server pingcap TiUP publish/cache server spark pingcap Spark is a fast and general cluster computing system for Big Data tidb pingcap TiDB is an open source distributed HTAP database compatible with the MySQL protocol tidb-lightning pingcap TiDB Lightning is a tool used for fast full import of large amounts of data into a TiDB cluster tiflash pingcap The TiFlash Columnar Storage Engine tikv pingcap Distributed transactional key-value database, originally created to complement TiDB tikv-importer pingcap tispark pingcap tispark tiup pingcap TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system ``` tiup-1.16.3/doc/user/bench.md000066400000000000000000000102711505422223000157030ustar00rootroot00000000000000# Benchmarking tiup bench has been **moved** to https://github.com/PingCAP-QE/tiup-bench To facilitate this, TiUP has integrated the bench component, which currently provides two workloads for pressure testing: tpcc and tpch, with the following command parameters: ```bash [user@localhost ~]# tiup bench Starting component `bench`: /Users/joshua/.tiup/components/bench/v0.0.1/bench Benchmark database with different workloads Usage: tiup bench [command] Available Commands: help Help about any command tpcc TPC-C workload tpch TPC-H workload Flags: --count int Total execution count, 0 means infinite -D, --db string Database name (default "test") -d, --driver string Database driver: mysql --dropdata Cleanup data before prepare -h, --help help for /Users/joshua/.tiup/components/bench/v0.0.1/bench -H, --host string Database host (default "127.0.0.1") --ignore-error Ignore error when running workload --interval duration Output interval time (default 10s) --isolation int Isolation Level 0: Default, 1: ReadUncommitted, 2: ReadCommitted, 3: WriteCommitted, 4: RepeatableRead, 5: Snapshot, 6: Serializable, 7: Linerizable --max-procs int runtime.GOMAXPROCS -p, --password string Database password -P, --port int Database port (default 4000) --pprof string Address of pprof endpoint --silence Don't print error when running workload --summary Print summary TPM only, or also print current TPM when running workload -T, --threads int Thread concurrency (default 16) --time duration Total execution time (default 2562047h47m16.854775807s) -U, --user string Database user (default "root") ``` ## TPC-C The commands and parameters of TPC-C are as follows: ```bash Available Commands: check check for data consistency cleanup clear data prepare prepare data run run benchmark Flags: --check-all run all consistency checks -h, --help tpcc help information --output string directory where csv file is generated when data is prepared --parts int number of partition repositories (default 1) --tables string specifies the tables used to generate the file, multiple tables can be split, only valid if output is set. All tables are generated by default --warehouses int number of repositories (default 10) ``` ### Example 1. Create 4 repositories using 4 partitions with hash ```shell tiup bench tpcc --warehouses 4 --parts 4 prepare ``` 2. Running TPC-C benchmark ```shell tiup bench tpcc --warehouses 4 run ``` 3. Cleanup data ```shell tiup bench tpcc --warehouses 4 cleanup ``` 4. Checking consistency ```shell tiup bench tpcc --warehouses 4 check ``` 5. Generating csv files ```shell tiup bench tpcc --warehouses 4 prepare --output data ``` 6. Generate csv files for specified tables ```shell tiup bench tpcc --warehouses 4 prepare --output data --tables history,orders ``` 7. Turn on pprof ```shell tiup bench tpcc --warehouses 4 prepare --output data --pprof :10111 ``` ## TPC-H The commands and parameters of TPC-C are as follows: ```bash Available Commands: cleanup cleanup data prepare prepare data run run benchmark Flags: --check checks the output data, only valid if the scale factor is 1 -h, --help tpch help information --queries string All query statements (default "q1,q2,q3,q4,q5,q6,q7,q8,q9,q10,q11,q12,q13,q14,q15,q16,q17,q18,q19,q20,q21,q22") --sf int scale factor ``` ## Example 1. Data preparation ```shell # Prepare data with scale factor 1 tiup bench tpch --sf=1 prepare # Or prepare data with scale factor 1, create tiflash replica, and analyze table after data loaded tiup bench tpch --sf=1 --analyze --tiflash prepare ``` 2. Benchmarking and checking results ```shell tiup bench tpch --sf=1 --check=true run ``` 3. Benchmarking without checking results ```shell tiup bench tpch --sf=1 run ``` 4. Cleanup ```shell tiup bench tpch cleanup ``` tiup-1.16.3/doc/user/cluster.md000066400000000000000000000417121505422223000163110ustar00rootroot00000000000000# Online cluster deployment and maintenance The cluster component deploys production clusters as quickly as playground deploys local clusters, and it provides more powerful cluster management capabilities than playground, including upgrades to the cluster, downsizing, scaling and even operational auditing. It supports a very large number of commands: ```bash $ tiup cluster The component `cluster` is not installed; downloading from repository. download https://tiup-mirrors.pingcap.com/cluster-v0.4.9-darwin-amd64.tar.gz 15.32 MiB / 15.34 MiB 99.90% 10.04 MiB p/s Starting component `cluster`: /Users/joshua/.tiup/components/cluster/v0.4.9/cluster Deploy a TiDB cluster for production Usage: tiup cluster [flags] tiup [command] Available Commands: deploy Deployment Cluster start Start deployed cluster stop Stop Cluster restart restart cluster scale-in cluster shrinkage Scale-out Cluster Scaling destroy Destroy cluster upgrade Upgrade Cluster exec executes commands on one or more machines in the cluster display Get cluster information list Get cluster list audit View cluster operation log import Import a cluster deployed by TiDB-Ansible edit-config Editing the configuration of TiDB clusters reload for overriding cluster configurations when necessary patch replaces deployed components on its cluster with temporary component packages help Print Help Information Flags: -h, -help Help Information --ssh-timeout int SSH connection timeout -y, --yes Skip all confirmation steps. ``` ## Deployment cluster The command used for deploying clusters is tiup cluster deploy, and its general usage is. ```bash tiup cluster deploy [flags] ``` This command requires us to provide the name of the cluster, the version of TiDB used by the cluster, and a topology file for the cluster, which can be written with reference to [example](/examples/topology.example.yaml). Take a simplest topology as an example: ```yaml --- pd_servers: - host: 172.16.5.134 name: pd-134 - host: 172.16.5.139 name: pd-139 - host: 172.16.5.140 name: pd-140 tidb_servers: - host: 172.16.5.134 - host: 172.16.5.139 - host: 172.16.5.140 tikv_servers: - host: 172.16.5.134 - host: 172.16.5.139 - host: 172.16.5.140 grafana_servers: - host: 172.16.5.134 monitoring_servers: - host: 172.16.5.134 ``` Save the file as `/tmp/topology.yaml`. If we want to use TiDB's v4.0.0-rc version with the cluster name prod-cluster, run: ```shell tiup cluster deploy prod-cluster v3.0.12 /tmp/topology.yaml ``` During execution, the topology is reconfirmed and prompted for the root password on the target machine. ```bash Please confirm your topology: TiDB Cluster: prod-cluster TiDB Version: v3.0.12 Type Host Ports Directories ---- ---- ----- ----------- pd 172.16.5.134 2379/2380 deploy/pd-2379,data/pd-2379 pd 172.16.5.139 2379/2380 deploy/pd-2379,data/pd-2379 pd 172.16.5.140 2379/2380 deploy/pd-2379,data/pd-2379 tikv 172.16.5.134 20160/20180 deploy/tikv-20160,data/tikv-20160 tikv 172.16.5.139 20160/20180 deploy/tikv-20160,data/tikv-20160 tikv 172.16.5.140 20160/20180 deploy/tikv-20160,data/tikv-20160 tidb 172.16.5.134 4000/10080 deploy/tidb-4000 tidb 172.16.5.139 4000/10080 deploy/tidb-4000 tidb 172.16.5.140 4000/10080 deploy/tidb-4000 prometheus 172.16.5.134 9090 deploy/prometheus-9090,data/prometheus-9090 grafana 172.16.5.134 3000 deploy/grafana-3000 Attention: 1. If the topology is not what you expected, check your yaml file. 1. Please confirm there is no port/directory conflicts in same host. Do you want to continue? [y/N]: ``` After entering the password, the tiup-cluster will download the required components and deploy them to the corresponding machine, indicating a successful deployment when you see the following prompt: ```bash Deployed cluster `prod-cluster` successfully ``` ## View cluster list Once the cluster is deployed we will be able to see it in the cluster list via the tiup cluster list: ```bash [user@localhost ~]# tiup cluster list Starting /root/.tiup/components/cluster/v0.4.5/cluster list Name User Version Path PrivateKey ---- ---- ------- ---- ---------- prod-cluster tidb v3.0.12 /root/.tiup/storage/cluster/clusters/prod-cluster /root/.tiup/storage/cluster/clusters/prod-cluster/ssh/id_rsa ``` ## Start the cluster. If you have forgotten the name of the cluster you have deployed, you can use the tiup cluster list to see the command to start the cluster: ```shell tiup cluster start prod-cluster ``` ## Checking cluster status We often want to know the operating status of each component in a cluster, and it's obviously inefficient to look at it from machine to machine, so it's time for the tiup cluster display, which is used as follows: ```bash [user@localhost ~]# tiup cluster display prod-cluster Starting /root/.tiup/components/cluster/v0.4.5/cluster display prod-cluster TiDB Cluster: prod-cluster TiDB Version: v3.0.12 ID Role Host Ports Status Data Dir Deploy Dir -- ---- ---- ----- ------ -------- ---------- 172.16.5.134:3000 grafana 172.16.5.134 3000 Up - deploy/grafana-3000 172.16.5.134:2379 pd 172.16.5.134 2379/2380 Healthy|L data/pd-2379 deploy/pd-2379 172.16.5.139:2379 pd 172.16.5.139 2379/2380 Healthy data/pd-2379 deploy/pd-2379 172.16.5.140:2379 pd 172.16.5.140 2379/2380 Healthy data/pd-2379 deploy/pd-2379 172.16.5.134:9090 prometheus 172.16.5.134 9090 Up data/prometheus-9090 deploy/prometheus-9090 172.16.5.134:4000 tidb 172.16.5.134 4000/10080 Up - deploy/tidb-4000 172.16.5.139:4000 tidb 172.16.5.139 4000/10080 Up - deploy/tidb-4000 172.16.5.140:4000 tidb 172.16.5.140 4000/10080 Up - deploy/tidb-4000 172.16.5.134:20160 tikv 172.16.5.134 20160/20180 Up data/tikv-20160 deploy/tikv-20160 172.16.5.139:20160 tikv 172.16.5.139 20160/20180 Up data/tikv-20160 deploy/tikv-20160 172.16.5.140:20160 tikv 172.16.5.140 20160/20180 Up data/tikv-20160 deploy/tikv-20160 ``` For normal components, the Status column will show "Up" or "Down" to indicate whether the service is normal or not, and for PD, the Status column will show Healthy or Down, and may have a |L to indicate that the PD is Leader. ## Condensation Sometimes the business volume decreases and the cluster takes up some of the original resources, so we want to safely release some nodes and reduce the cluster size, so we need to downsize. The reduction is offline service, which eventually removes the specified node from the cluster and deletes the associated data files left behind. Since the downlinking of TiKV and Binlog components is asynchronous (requires removal through the API) and the downlinking process is time-consuming (requires constant observation to see if the node has been downlinked successfully), special treatment has been given to TiKV and Binglog components: - Operation of TiKV and Binlog components - TiUP cluster exits directly after it is offline via API without waiting for the offline to complete - When you wait until later, you will check for the presence of TiKV or Binlog nodes that have already been downlinked when you execute commands related to cluster operations. If it does not exist, the specified operation continues; if it does, the following operation is performed. - Stopping the service of nodes that have been downlinked - Clean up the data files associated with nodes that have been taken offline - Update the topology of the cluster and remove nodes that have been dropped - Operation of other components - The downlink of the PD component removes the specified node from the cluster via the API (a quick process), then disables the service of the specified PD and clears the data file associated with that node - Directly stop and clear the data files associated with the node when other components are downlinked Basic usage of the condensation command: ```bash tiup cluster-scale-in -N ```` It needs to specify at least two parameters, one is the cluster name and the other is the node ID, which can be obtained using the tiup cluster display command with reference to the previous section. For example, I want to kill the TiKV on 172.16.5.140, so I can execute: ```bash [user@localhost ~]# tiup cluster display prod-cluster Starting /root/.tiup/components/cluster/v0.4.5/cluster display prod-cluster TiDB Cluster: prod-cluster TiDB Version: v3.0.12 ID Role Host Ports Status Data Dir Deploy Dir -- ---- ---- ----- ------ -------- ---------- 172.16.5.134:3000 grafana 172.16.5.134 3000 Up - deploy/grafana-3000 172.16.5.134:2379 pd 172.16.5.134 2379/2380 Healthy|L data/pd-2379 deploy/pd-2379 172.16.5.139:2379 pd 172.16.5.139 2379/2380 Healthy data/pd-2379 deploy/pd-2379 172.16.5.140:2379 pd 172.16.5.140 2379/2380 Healthy data/pd-2379 deploy/pd-2379 172.16.5.134:9090 prometheus 172.16.5.134 9090 Up data/prometheus-9090 deploy/prometheus-9090 172.16.5.134:4000 tidb 172.16.5.134 4000/10080 Up - deploy/tidb-4000 172.16.5.139:4000 tidb 172.16.5.139 4000/10080 Up - deploy/tidb-4000 172.16.5.140:4000 tidb 172.16.5.140 4000/10080 Up - deploy/tidb-4000 172.16.5.134:20160 tikv 172.16.5.134 20160/20180 Up data/tikv-20160 deploy/tikv-20160 172.16.5.139:20160 tikv 172.16.5.139 20160/20180 Up data/tikv-20160 deploy/tikv-20160 172.16.5.140:20160 tikv 172.16.5.140 20160/20180 Offline data/tikv-20160 deploy/tikv-20160 ``` The node is automatically deleted after the PD schedules its data to other TiKVs. ## Expansion. The internal logic of scaling is similar to deployment in that the TiUP cluster first guarantees the SSH connection of the node, creates the necessary directory on the target node, then executes the deployment and starts the service. The PD node's expansion is added to the cluster by join, and the configuration of the services associated with the PD is updated; other services are added directly to the cluster. All services do correctness validation at the time of expansion and eventually return whether the expansion was successful. For example, expanding a TiKV node and a PD node in a cluster tidb-test: ### 1. New scale.yaml file, add TiKV and PD node IP > **Note** > > Note that a new topology file is created that writes only the description of the expanded node, not the existing node. ```yaml --- pd_servers: - ip: 172.16.5.140 tikv_servers: - ip: 172.16.5.140 ```` ### 2. Perform capacity expansion operations TiUP cluster add the corresponding node to the cluster according to the information such as port, directory, etc. declared in the scale.yaml file: ```shell tiup cluster scale-out tidb-test scale.yaml ```` After execution, you can check the expanded cluster status with the `tiup cluster display tidb-test` command. ## Rolling upgrade The rolling upgrade feature leverages TiDB's distributed capabilities to keep the upgrade process as transparent and non-aware of the front-end business as possible. If there is a problem with the configuration, the tool will be upgraded node by node. Which has different operations for different nodes. ### The operation of different nodes - Upgrade PD - Prioritize upgrading non-Leader nodes - Upgrade all non-Leader nodes after the upgrade is complete. - The tool sends a command to the PD to migrate the Leader to the node where the upgrade is complete - When Leader has been switched to another node, upgrade the old Leader node. - At the same time, if there is an unhealthy node in the upgrade process, the tool will suspend the upgrade and exit, at this time, the manual judgment, repair and then perform the upgrade. - Upgrade TiKV - First add a migration to the PD that corresponds to the scheduling of the region leader on TiKV, and ensure that the upgrade process does not affect the front-end business by migrating the leader - Wait for the migration leader to complete before updating the TiKV node - Wait for the updated TiKV to start normally before removing the migration leader's scheduling. - Upgrade other services - Normal out-of-service updates ### Upgrade operation The upgrade command parameters are as follows: ```bash'' Usage: tiup cluster upgrade [flags] Flags: --force forces escalation without transfer leader (dangerous operation) -h, --help help manual --transfer-timeout int transfer leader's timeout Global Flags: --ssh-timeout int SSH connection timeout -y, --yes Skip all confirmation steps. ```` For example, to upgrade a cluster to v4.0.0-rc, you need only one command: ```bash $ tiup cluster upgrade tidb-test v4.0.0-rc ```` ## Update configuration Sometimes we want to dynamically update the configuration of a component, tiup-cluster saves a copy of the current configuration for each cluster, and if we want to edit this configuration, we execute `tiup cluster edit-config `, for example: ```bash tiup cluster edit-config prod-cluster ```` The tiup-cluster then uses vi to open the configuration file for editing and save it after editing. The configuration is not applied to the cluster at this point, and if you want it to take effect, you need to execute: ```bash tiup cluster reload prod-cluster ```` This action sends the configuration to the target machine, restarts the cluster, and makes the configuration effective. ## Update components Regular upgrade clusters can use the upgrade command, but in some scenarios (e.g. Debug) it may be necessary to replace a running component with a temporary package, in which case you can use the patch command ```bash [user@localhost ~]# tiup cluster patch --help Replace the remote package with a specified package and restart the service Usage: tiup cluster patch [flags] Flags: -h, --help Help Information -N, --node strings specify the node to be replaced --overwrite uses the currently specified temporary package in future scale-out operations -R, -role strings Specify the type of service to be replaced --transfer-timeout int transfer leader's timeout Global Flags: --ssh-timeout int SSH connection timeout -y, --yes Skip all confirmation steps ``` For example, if there is a TiDB hotfix package in /tmp/tidb-hotfix.tar.gz, and we want to replace all TiDBs on the cluster, we can: ```bash tiup cluster patch test-cluster /tmp/tidb-hotfix.tar.gz -R tidb ``` Or just replace one of the TiDBs: ``` tiup cluster patch test-cluster /tmp/tidb-hotfix.tar.gz -N 172.16.4.5:4000 ``` ## Importing TiDB-Ansible clusters Before TiUP, clusters were generally deployed using TiDB-Ansible, and the import command was used to transition this part of the cluster to TiUP receivership. Use of the import command. ```bash [user@localhost ~]# tiup cluster import --help Import an existing TiDB cluster from TiDB-Ansible Usage: tiup cluster import [flags] Flags: -d, --dir string TiDB-Ansible's directory, default is current directory -h, -help import help information --inventory string inventory file name (default is "event.ini") --no-backup does not backup Ansible directories, for Ansible directories with multiple inventory files -r, --rename NAME Rename the imported cluster Global Flags: --ssh-timeout int SSH connection timeout -y, --yes Skip all confirmation steps ``` Example: Importing a cluster: ```bash cd tidb-ansible tiup cluster import ``` perhaps ```bash tiup cluster import --dir=/path/to/tidb-ansible ``` tiup-1.16.3/doc/user/mirrors.md000066400000000000000000000120521505422223000163200ustar00rootroot00000000000000# Build a private mirror When building a private cloud, it is common to use an isolated network environment where the official mirror of TiUP is not accessible, so we provide a solution for building a private mirror, which is mainly implemented by the mirror component, which can also be used for offline deployment. ## Mirrors component introduction First, let's look at the `mirror' help file. ```bash $ tiup mirror --help The 'mirror' command is used to manage a component repository for TiUP, you can use it to create a private repository, or to add new component to an existing repository. The repository can be used either online or offline. It also provides some useful utilities to help managing keys, users and versions of components or the repository itself. Usage: tiup mirror [flags] Available Commands: init Initialize an empty repository sign Add signatures to a manifest file genkey Generate a new key pair clone Clone a local mirror from remote mirror and download all selected components merge Merge two or more offline mirror publish Publish a component show Show mirror address set Set mirror address modify Modify published component renew Renew the manifest of a published component. grant grant a new owner rotate Rotate root.json Global Flags: --help Help for this command --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string Use "tiup mirror [command] --help" for more information about a command. ``` Its basic use is `tiup mirror clone [global-version] [flags]`, the target-dir is the directory in which the cloned data needs to be placed. global-version is used to quickly set a common version for all components. Then this order has very scary dozens of flags and even more later. But there is no need to be intimidated by the number of these flags, which are in fact of four types. ### 1. specify whether to override local packages The `--overwrite` parameter means that if the specified already has a package that you want to download, you should overwrite it with the official image of the package, if this flag is set it will overwrite it. ### 2. Whether to clone in full quantity If `--full` is specified, the official image will be cloned intact. > **Note** > > If `--full` is not specified and no other flag is specified, then only some meta information will be cloned. ### 3. Platform limitation If you only want to clone packages for a particular platform, you can use `-os` and `-arch` to qualify: - `tiup mirror clone ---os=linux` - Just want to clone amd64 architecture: `tiup mirror clone --arch=amd64` - Just want to clone linux/amd64: `tiup mirror clone --os=linux --arch=amd64` ### 4. Component version limited If you want to clone only one version of a component and not all versions, use `--=` to qualify, for example " - Just want to clone the v4 version of tidb: `tiup mirror clone --tidb v4` - Just want to clone the v4 version of tidb, and all versions of tikv: `tiup mirror clone --tidb v4 --tikv all` - Clone specific versions of all components that start a cluster: `tiup mirror clone v4.0.0-rc` ## The real thing ### Offline installation For example, if we want to install a v4.0.0-rc TiDB cluster in an isolated environment, we can execute the following command on a machine connected to the extranet to pull the required components: ```bash tiup mirror package --os=linux v4.0.0-rc ``` This command creates a directory called `package` in the current directory that contains the package of components necessary to start a cluster, which is then packaged by the tar command and sent to a central control unit in an isolated environment: ```bash tar czvf package.tar.gz package ``` package.tar.gz is a standalone offline environment. After sending it to the target cluster's central controller, install TiUP with the following command: ```bash tar xzvf package.tar.gz cd package sh local_install.sh ``` After installing TiUP as prompted, deploy the TiDB cluster (assuming the working directory is still in the package): ```bash export TIUP_MIRRORS=/path/to/mirror tiup cluster xxx ``` `/path/to/mirror` is the location of in `tiup mirror clone `, or if in /tmp/package: ```bash export TIUP_MIRRORS=/tmp/package ``` For cluster operations, refer to the [cluster command](. /cluster.md). ### Private Mirror The way to build a private image is the same as for an offline installation package, just upload the contents of the package directory to a CDN or file server: ```bash cd package python -m SimpleHTTPServer 8000 ``` This creates a private image at the address http://127.0.0.1:8000. Installation of TiUP: ```bash export TIUP_MIRRORS=http://127.0.0.1:8000 curl $TIUP_MIRRORS/install.sh | sh ``` After importing the PATH variable, you can use TiUP normally (you need to keep the TIUP_MIRRORS variable pointing to a private image). tiup-1.16.3/doc/user/overview.md000066400000000000000000000151601505422223000164740ustar00rootroot00000000000000# TiUp manual In the installation and management of various system software and application software, the package manager has a wide range of applications, the emergence of package management tools greatly simplifies the installation and upgrade of software maintenance work. For example, almost all Linux using RPM uses Yum for package management, while Anaconda makes it very easy to manage the python environment and related packages. In the early TiDB ecosystem, there was no dedicated package management tool, and users could only manage it manually through the appropriate configuration files and folder names. Today, TiUP assumes the role of package manager as a new tool in the TiDB 4.0 ecosystem, managing numerous components (e.g. TiDB, PD, TiKV) within the TiDB ecosystem. When you want to run anything in the TiDB ecosystem, you just need to execute a single line of TiUP commands, which is much less difficult to manage than before. ## Installation As a minimalist, TiUP is also very simple to install, with one command for both Darwin and Linux. ```bash curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh ``` This command installs TiUP in the $HOME/.tiup folder, where the components installed and the data generated by their operation will also be placed. It also automatically adds $HOME/.tiup/bin to the PATH environment variable in the Shell Profile file, so you can use TiUP directly, for example, to see the version of TiUP. ```bash tiup --version ``` ## Key features The direct function of TiUP is to be a package manager in the TiDB ecosystem, but this is not its ultimate mission. Its vision is to lower the threshold of use of all tools in the TiDB ecosystem to the limit, which cannot be done by package management functions alone. In the TiUP world, any command can be followed by `--help` to get help information, such as the following command to get help information about TiUP itself: ```bash $ tiup --help TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system. You can run a specific version of a component via "tiup [:version]". If no version number is specified, the latest version installed locally will be used. If the specified component does not have any version installed locally, the latest stable version will be downloaded from the repository. Usage: tiup [flags] [args...] tiup [flags] [args...] Available Commands: install Install a specific version of a component list List the available TiDB components or versions uninstall Uninstall components or versions of a component update Update tiup components to the latest version status List the status of instantiated components clean Clean the data of instantiated components help Help about any command or component Available Components: playground Bootstrap a local TiDB cluster client A simple mysql client to connect TiDB package A toolbox to package tiup component cluster Deploy a TiDB cluster for production mirrors Build a local mirrors and download all selected components bench Benchmark database with different workloads doc Online document for TiDB Flags: -B, --binary [:version] Print binary path of a specific version of a component [:version] and the latest version installed will be selected if no version specified --binpath string Specify the binary path of component instance -h, --help help for tiup --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string -T, --tag string Specify a tag for component instance --version version for tiup Component instances with the same "tag" will share a data directory ($TIUP_HOME/data/$tag): $ tiup --tag mycluster playground Examples: $ tiup playground # Quick start $ tiup playground nightly # Start a playground with the latest nightly version $ tiup install [:version] # Install a component of specific version $ tiup update --all # Update all installed components to the latest version $ tiup update --nightly # Update all installed components to the nightly version $ tiup update --self # Update the "tiup" to the latest version $ tiup list # Fetch the latest supported components list $ tiup status # Display all running/terminated instances $ tiup clean # Clean the data of running/terminated instance (Kill process if it's running) $ tiup clean --all # Clean the data of all running/terminated instances Use "tiup [command] --help" for more information about a command. ``` The output of help information is long, but only two parts need to be focused on. - Available commands - install: for installing components - list: View the list of available components - uninstall: uninstall components - update: update component version - status: View component run history - clean: clears the component run log - Help: Output help information. - Available components - playground: Start cluster locally - client: cluster connected to the local machine - mirrors: cloning a private mirror from an official mirror - package: package a new component - bench: stress test the database - doc: Open online document > **Note** > > The number of available components will continue to increase, depending on the output of the `tiup cluster list`. The difference between a command and a component is that the command comes with TiUP and is used for package management operations, while the component is a separate package of components that TiUP installs through package management operations. For example, executing the command `tiup list` TiUP will run its own internal code directly, while executing the component `tiup playground` will first check if there is a local package called playground, if not, then download it from the image and run the package. [Package management](./package-manage.md) chapter will introduce all the commands and the components will be divided into chapters according to the different components: - [Local Rapid Deployment TiDB Cluster](./playground.md) - [Online cluster deployment and operation](./cluster.md) - [build private mirror](./mirrors.md) - [package command](./package.md) - [Bench marking](./bench.md) tiup-1.16.3/doc/user/package-manage.md000066400000000000000000000141121505422223000174430ustar00rootroot00000000000000# Packet management Component management is done primarily through TiUP's subcommands, which currently exist. - list: look up the list of components to know which ones are available for installation and which versions are available - install: Install a specific version of a component. - update: Upgrade a component to the latest version - uninstall: uninstall components - status: View component run status - clean: clean component example - help: Print the help information, followed by the command to print how to use it ## Query component list: tipped list When you want to install something with TiUP, you first need to know what components are available and what versions of those components are available, which is what the list command does. It supports several of these uses. - tiup list: See what components are currently available for installation - tiup list ${component}: See what versions of a component are available For the above two methods of use, two flags can be used in combination. --installed: which components are already installed locally, or which versions of a component are already installed Example 1: View all components currently installed ```shell tiup list --installed ``` Example 2: Get a list of all TiKV installable version components from the server ```shell tiup list tikv ``` ## Install component: tiup install After viewing the list of components, installation is also very simple, using the tiup install command, which is used as follows. - tiup install : Install the latest stable version of the specified component - tiup install :[version]: Install the specified version of the specified component Example 1: Installing the latest stable version of TiDB using TiUP ```shell tiup install tidb ``` Example 2: Installing the nightly version of TiDB with TiUP ```shell tiup install tidb:nightly ``` Example 3: Installing TiKV version v3.0.6 with TiUP ```shell tiup install tikv:v3.0.6 ``` ## Upgrade components After the new version of the official component is available, it is also possible to upgrade with TiUP, which is used in much the same way as install, except for a few additional flags: ---all: Upgrade all components --nightly: Upgrade to nightly version --self: Upgrade TiUP yourself to the latest version --force: mandatory upgrade to the latest version Example 1: Upgrade all components to the latest version ```shell tiup update --all ``` Example 2: Upgrade all components to the nightly version ```shell tiup update --all --nightly ``` Example 3: Upgrade TiUP to the latest version ```shell tiup update --self ``` ## Run component: tiup After the installation is complete, the appropriate components can be launched using TiUP. ```shell tiup [flags] [:version] [args...] ``` This command requires the name of a component and an optional version, or if no version is provided, the latest stable version of the component installed. Before the component starts, TiUP creates a directory for it and then puts the component into that directory to run. The component generates all the data in that directory, and the name of the directory is the tag name specified by the component at runtime. If no tag is specified, a tag name is generated at random and the working directory is *automatically deleted* upon instance termination. If we want to start the same component multiple times and reuse the previous working directory, we can specify the same name at startup with --tag. By specifying a tag, the working directory is *not automatically deleted* when the instance is terminated, making it easy to reuse at the next startup. Example 1: Running TiDB version v3.0.8 ```shell tiup tidb:v3.0.8 ``` Example 2: Specifying the tag to run TiKV ```shell tiup--tag=experiment tikv ``` ### Query component runtime status: tiup status The tiup status allows you to view the component's operational status, which is very simple to use. ```shell tiup status ``` Running this command will get a list of instances, one per line. The list contains these columns. - Name: tag name of the instance - Component: The component name of the instance. - PID: Process ID of the instance running - Status: Instance status, RUNNING means running, TERM means terminated - Created Time: Start time of the instance - Directory: the working directory of the instance, which can be specified by --tag - Binary: The executable of the instance, which can be specified by --binpath - Args: the running parameters of the instance ### Clean component example: tiup clean The component instances can be cleaned with tiup clean and the working directory can be removed. If the instance is still running before the cleanup, the associated process is killed first. The corresponding commands and parameters are as follows. ```bash tiup clean [tag] [tags] [flags] ``` The following flags are supported: --all Clear all instance information. where tag indicates the instance tag to be cleaned, and if `--all` is used, no tag is passed Example 1: Example of a component whose tag name is experiment ```shell tiup clean experiment ``` Example 2: Clean up all component examples {{< copyable "shell-regular">} ```shell tiup clean --all ``` ### Uninstall component: tiup uninstall TiUP supports uninstalling all or specific versions of a component, as well as uninstalling all components. The basic usage is as follows. ```bash tiup uninstall [component][:version] [flags] ``` Supported flags: --all uninstall all components or versions --self Uninstall TiUP itself component is the name of the component to be uninstalled, and version is the version to be uninstalled, both of which can be omitted, either of which needs to be added `--all` to use. - If the version is omitted, add `--all` to uninstall all versions of the component - If both versions and components are omitted, add `--all' to indicate that all components and all their versions are uninstalled Example 1: Uninstalling TiDB in v3.0.8 ```shell tiup uninstall tidb:v3.0.8 ``` Example 2: Uninstall all versions of TiKV ```shell tiup uninstall tikv --all ``` Example 3: Uninstall all installed components ```shell tiup uninstall --all ``` tiup-1.16.3/doc/user/package.md000066400000000000000000000064351505422223000162260ustar00rootroot00000000000000# Component packaging When you want to add a new component, or add a version of an existing component, you need to use tar to package the relevant file and then pass it to the mirror repository, using tar to package is not a difficult thing, the trouble is that you need to update the repository's meta-information, to avoid updating meta-information when corrupting the information of existing components. So the package component takes on this task. ```bash [user@localhost ~]# tiup package --help Package a tiup component and generate package directory Usage: tiup package target [flags] Flags: -C, -- string Change directory before compress --arch string Target ARCH of the package (default "amd64") --desc string Description of the package --entry string Entry point of the package -h, --help help for tiup --hide tiup list Don't show the component in tiup list --name string Name of the package --os string Target OS of the package (default "darwin") --release string Version of the package --standalone Can the component run standalone ``` ## Hello World In this section we develop and package a hello component whose only function is to output the contents of its own configuration file, which is called "Hello World". For the sake of simplicity, we use the bash script to develop this component. 1. first create its configuration file, which contains only "Hello World": ```shell cat > config.txt << EOF Hello World EOF ``` 2. The executable is then created: ```shell cat > hello.sh << EOF #! /bin/sh cat \${TIUP_COMPONENT_INSTALL_DIR}/config.txt EOF chmod 755 hello.sh ``` The environment variable `TIUP_COMPONENT_INSTALL_DIR` is passed in by TiUP at runtime and points to the installation directory of the component. 3. Then refer to [Build Private Mirror] (. /mirrors.md) to build an offline or private mirror (mainly because the official mirror is not available to publish). /mirrors.md) to build an offline or private mirror (mainly because the official mirror is not available to publish its own package), and make sure the TIUP_MIRRORS variable points to the built mirror. 4. Wrapping: ```shell tiup package hello.sh config.txt --name=hello --entry=hello.sh --release=v0.0.1 ``` This step creates a package directory where the packaged files and meta information are placed. 5. Upload to the warehouse Since the official repository is not currently open for uploading, we can only upload to our own mirrors built in step 3, by copying all the files in the package directly to ${target-dir} in step 3 tiup mirrors. ```bash cp package/* path/to/mirror/ ``` If the directory created in step 3 happens to be in the current directory and is called package, then there is no need to manually copy it. 6. Implementation ```bash [user@localhost ~]# tiup list hello Available versions for hello (Last Modified: 2020-04-23T16:45:53+08:00): Version Installed Release: Platforms ------- --------- -------- --------- v0.0.1 2020-04-23T16:51:41+08:00 darwin/amd64 [user@localhost ~]# tiup hello The component `hello` is not installed; downloading from repository. Starting component `hello`: /Users/joshua/.tiup/components/hello/v0.0.1/hello.sh Hello World ``` tiup-1.16.3/doc/user/playground.md000066400000000000000000000131201505422223000170040ustar00rootroot00000000000000# Local Rapid Deployment TiDB Cluster TiDB clusters are distributed systems consisting of multiple components, and a typical TiDB cluster consists of at least 3 PD nodes, 3 TiKV nodes, and 2 TiDB nodes. Deploying so many components by hand can be time consuming and a headache for users and even TiDB developers who want to experience TiDB. In this section, we will introduce the playground component in TiUP and use this component to build a native TiDB test environment. ## Playground component introduction playground's basic usage: ```bash tiup playground [version] [flags] ``` The simplest startup command `tiup playground` will start a cluster of 1 KV, 1 DB, 1 PD using locally installed TiDB/TiKV/PD or their stable version. This order actually does the following. - Since no version is specified, TiUP looks for the latest version of the installed playground first, assuming the latest version is v0.0.6, which is equivalent to tiup playground:v0.0.6. - If the playground component has never had any version installed, TiUP will install the latest stable version before starting the runtime instance. - Since playground does not specify the version of each TiDB/PD/TiKV component, by default it will use the latest release version of each component, and assuming the current version is v4.0.0-rc, this command is equivalent to tiup playground:v0.0.6 v4.0.0-rc - Since playground also does not specify the number of components, by default it starts a minimized cluster of 1 TiDB, 1 TiKV and 1 PD - After starting each component in turn, the playground will tell you that it started successfully and tell you some useful information, such as how to connect the cluster through the MySQL client, how to access the dashboard The command line arguments for playground state: ```bash Usage: tiup playground [version] [flags] tiup [command] Available Commands: completion generate the autocompletion script for the specified shell display help Help about any command scale-in scale-out Flags: --db int TiDB instance number --db.host host Playground TiDB host. If not provided, TiDB will still use host flag as its host --db.port int Playground TiDB port. If not provided, TiDB will use 4000 as its port --db.binpath string TiDB instance binary path --db.config string TiDB instance configuration file --db.timeout int TiDB max wait time in seconds for starting, 0 means no limit --drainer int Drainer instance number --drainer.binpath string Drainer instance binary path --drainer.config string Drainer instance configuration file --grafana.port int grafana port. If not provided, grafana will use 3000 as its port. (default 3000) -h, --help help for tiup --host string Playground cluster host --kv int TiKV instance number --kv.binpath string TiKV instance binary path --kv.config string TiKV instance configuration file --mode string TiUP playground mode: 'tidb', 'tikv-slim' (default "tidb") --pd int PD instance number --pd.Host host Playground PD host. If not provided, PD will still use host flag as its host --pd.binpath string PD instance binary path --pd.config string PD instance configuration file --pump int Pump instance number --pump.binpath string Pump instance binary path --pump.config string Pump instance configuration file -T, --tag string Specify a tag for playground, data dir of this tag will not be removed after exit --ticdc int TiCDC instance number --ticdc.binpath string TiCDC instance binary path --ticdc.config string TiCDC instance configuration file --tiflash int TiFlash instance number --tiflash.binpath string TiFlash instance binary path --tiflash.config string TiFlash instance configuration file --tiflash.timeout int TiFlash max wait time in seconds for starting, 0 means no limit -v, --version version for tiup --without-monitor Don't start prometheus and grafana component ``` ## Example ### Start a TiDB Cluster with Daily Build ```shell tiup playground nightly ``` Nightly is the version number of this cluster, and similar ones can be `tiup playground v4.0.0-rc` etc. ### Start a cluster with or without monitoring. ```shell tiup playground nightly ``` This command launches Prometheus on port 9090 and Grafana on port 3000 for displaying timing data within the cluster. ```shell tiup playground nightly --without-monitor ``` This won't launch Prometheus or Grafana. This can be used to save resources. ### Overrides the default configuration of the PD Copy PD's [configuration template](https://github.com/pingcap/pd/blob/master/conf/config.toml), modify some content, and then execute: ```shell tiup playground --pd.config ~/config/pd.toml ``` > Here the configuration is assumed to be placed at `~/config/pd.toml` ### Replace the default binary file If a temporary binary is compiled locally and you want to put it into a cluster for testing, you can use the flag --{comp}.binpath to replace it, for example, the TiDB binary: ```shell tiup playground --db.binpath /xx/tidb-server ``` ### Start multiple component instances By default, TiDB, TiKV and PD each start one, and if you want to start more than one, you can do this: ```shell tiup playground v3.0.10 --db 3 --pd 3 --kv 3 ``` tiup-1.16.3/docker/000077500000000000000000000000001505422223000140255ustar00rootroot00000000000000tiup-1.16.3/docker/.gitignore000066400000000000000000000001031505422223000160070ustar00rootroot00000000000000secret/.gitkeep !.gitkeep docker-compose.dm.yml docker-compose.yml tiup-1.16.3/docker/README.md000066400000000000000000000012121505422223000153000ustar00rootroot00000000000000# Dockerized tiup-cluster This docker image attempts to simplify the setup required by tiup-cluster. It is intended to be used by a CI tool or anyone with docker who wants to try tiup-cluster themselves. It contains all the tiup-cluster dependencies and code. It uses [Docker Compose](https://github.com/docker/compose) to spin up the five containers used by tiup-cluster. To start run ``` ./up.sh docker exec -it tiup-cluster-control bash ``` During development, it's convenient to run with `--dev` option, which mounts `$TIUP_CLUSTER_ROOT` dir as `/tiup-cluster` on tiup-cluster control container. Run `./up.sh --help` for more info. tiup-1.16.3/docker/control/000077500000000000000000000000001505422223000155055ustar00rootroot00000000000000tiup-1.16.3/docker/control/.gitignore000066400000000000000000000000231505422223000174700ustar00rootroot00000000000000tiops tiup-cluster tiup-1.16.3/docker/control/Dockerfile000066400000000000000000000010411505422223000174730ustar00rootroot00000000000000FROM golang:1.24 WORKDIR /tiup-cluster COPY . /tiup-cluster/ # tiup-cluster dependencies RUN apt-get -y -q update && \ apt-get install -qqy \ dos2unix \ default-mysql-client \ psmisc \ vim # not required by tiup-cluster itself, just for ease of use ADD bashrc /root/.bashrc ADD init.sh /init.sh RUN dos2unix /init.sh /root/.bashrc && \ chmod +x /init.sh && \ mkdir -p /root/.ssh && \ echo "Host *\n ServerAliveInterval 30\n ServerAliveCountMax 3" >> /root/.ssh/config CMD /init.sh tiup-1.16.3/docker/control/bashrc000066400000000000000000000015601505422223000166740ustar00rootroot00000000000000eval $(ssh-agent) &> /dev/null ssh-add /root/.ssh/id_rsa &> /dev/null export PATH=$PATH:/usr/local/go/bin export PATH=$PATH:/tiup-cluster/bin if [ -d "/mirrors" ]; then export TIUP_MIRRORS=/mirrors fi # You may uncomment the following lines if you want `ls' to be colorized: export LS_OPTIONS='--color=auto' eval "`dircolors`" alias ls='ls $LS_OPTIONS' alias ll='ls $LS_OPTIONS -l' alias l='ls $LS_OPTIONS -lA' mkdir -p ~/.tiup/bin [[ -f ~/.tiup/bin/root.json ]] || curl "https://tiup-mirrors.pingcap.com/root.json" > ~/.tiup/bin/root.json cat < ~/.ssh/id_rsa chmod 600 ~/.ssh/id_rsa echo $SSH_PUBLIC_KEY > ~/.ssh/id_rsa.pub echo > ~/.ssh/known_hosts for f in $(seq 1 5);do ssh-keyscan -t rsa n$f >> ~/.ssh/known_hosts done fi # TODO: assert that SSH_PRIVATE_KEY==~/.ssh/id_rsa cat < Found authorized keys" mkdir -p /root/.ssh chmod 700 /root/.ssh touch /root/.ssh/authorized_keys chmod 600 /root/.ssh/authorized_keys IFS=$'\n' arr=$(echo ${AUTHORIZED_KEYS} | tr "," "\n") for x in $arr do x=$(echo $x |sed -e 's/^ *//' -e 's/ *$//') cat /root/.ssh/authorized_keys | grep "$x" >/dev/null 2>&1 if [ $? -ne 0 ]; then echo "=> Adding public key to /root/.ssh/authorized_keys: $x" echo "$x" >> /root/.ssh/authorized_keys fi done fi # exec /usr/sbin/sshd -D exec /bin/systemd tiup-1.16.3/docker/secret/000077500000000000000000000000001505422223000153125ustar00rootroot00000000000000tiup-1.16.3/docker/secret/.gitkeep000066400000000000000000000000001505422223000167310ustar00rootroot00000000000000tiup-1.16.3/docker/up.sh000077500000000000000000000156761505422223000150270ustar00rootroot00000000000000#!/usr/bin/env bash # "To provide additional docker-compose args, set the COMPOSE var. Ex: # COMPOSE="-f FILE_PATH_HERE" set -o errexit set -o pipefail set -o nounset set -o xtrace ERROR() { echo -e "\e[101m\e[97m[ERROR]\e[49m\e[39m" "$@" } WARNING() { echo -e "\e[101m\e[97m[WARNING]\e[49m\e[39m" "$@" } INFO() { echo -e "\e[104m\e[97m[INFO]\e[49m\e[39m" "$@" } exists() { type "$1" > /dev/null 2>&1 } # Change directory to the source directory of this script. Taken from: # https://stackoverflow.com/a/246128/3858681 pushd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" HELP=0 INIT_ONLY=0 COMPOSE=${COMPOSE:-""} SUBNET=${SUBNET:-"172.19.0.0/24"} PROXY_SUBNET=${PROXY_SUBNET:-"172.19.1.0/24"} NODES=${NODES:-5} RUN_AS_DAEMON=0 INCLUDE_PROXY_NODES=0 POSITIONAL=() while [[ $# -gt 0 ]] do key="$1" case $key in -h|--help) HELP=1 shift # past argument ;; --init-only) INIT_ONLY=1 shift # past argument ;; --compose) COMPOSE="-f $2" shift # past argument shift # past value ;; --subnet) SUBNET="$2" shift # past argument shift # past value ;; -n|--nodes) NODES="$2" shift # past argument shift # past value ;; -d|--daemon) INFO "Running docker-compose as daemon" RUN_AS_DAEMON=1 shift # past argument ;; --ssh-proxy) INFO "Include SSH proxy nodes" INCLUDE_PROXY_NODES=1 shift # past argument ;; --proxy-subnet) PROXY_SUBNET="$2" shift # past argument shift # past value ;; *) POSITIONAL+=("$1") ERROR "unknown option $1" shift # past argument ;; esac done # comment because ERROR: # ./up.sh: line 79: POSITIONAL[@]: unbound variable] # set -- "${POSITIONAL[@]}" # restore positional parameters if [ "${HELP}" -eq 1 ]; then echo "Usage: $0 [OPTION]" echo " --help Display this message" echo " --init-only Initializes ssh-keys, but does not call docker-compose" echo " --daemon Runs docker-compose in the background" echo " --compose PATH Path to an additional docker-compose yml config." echo " --subnet SUBNET Subnet in 24 bit netmask" echo " --nodes NODES Start how much nodes" echo " --ssh-proxy Start with ssh proxy nodes" echo " --proxy-subnet PROXY_SUBNET Proxy subnet in 24 bit netmask" echo "To provide multiple additional docker-compose args, set the COMPOSE var directly, with the -f flag. Ex: COMPOSE=\"-f FILE_PATH_HERE -f ANOTHER_PATH\" ./up.sh'" exit 0 fi exists ssh-keygen || { ERROR "Please install ssh-keygen (apt-get install openssh-client)"; exit 1; } exists perl || { ERROR "Please install perl (apt-get install perl)"; exit 1; } # Generate SSH keys for the control node if [ ! -f ./secret/node.env ]; then INFO "Generating key pair" mkdir -p secret ssh-keygen -t rsa -N "" -f ./secret/id_rsa INFO "Generating ./secret/control.env" { echo "# generated by tiup-cluster/docker/up.sh, parsed by tiup-cluster/docker/control/bashrc"; echo "# NOTE: newline is expressed as ↩"; echo "SSH_PRIVATE_KEY=$(perl -p -e "s/\n/↩/g" < ./secret/id_rsa)"; echo "SSH_PUBLIC_KEY=$(cat ./secret/id_rsa.pub)"; } >> ./secret/control.env INFO "Generating ./secret/node.env" { echo "# generated by tiup-cluster/docker/up.sh, parsed by the \"tutum/debian\" docker image entrypoint script"; echo "ROOT_PASS=root"; echo "AUTHORIZED_KEYS=$(cat ./secret/id_rsa.pub)"; } >> ./secret/node.env else INFO "No need to generate key pair" fi # Make sure folders referenced in control Dockerfile exist and don't contain leftover files rm -rf ./control/tiup-cluster mkdir -p ./control/tiup-cluster/tiup-cluster if [ "${INIT_ONLY}" -eq 1 ]; then exit 0 fi if [ "${SUBNET##*/}" -ne 24 ]; then ERROR "Only subnet mask of 24 bits are currently supported" exit 1 fi if [ "$NODES" -gt "64" ]; then ERROR "At most 64 nodes is supported" exit 1 fi exists python || { ERROR "Please install python (https://www.python.org/downloads/)"; exit 1; } exists docker || { ERROR "Please install docker (https://docs.docker.com/engine/installation/)"; exit 1; } exists pip || { ERROR "Please install pip (https://docs.docker.com/engine/installation/)"; exit 1; } pip3 install -U jinja2 exist_network=$(docker network ls | awk '{if($2 == "tiops") print $1}') if [[ "$exist_network" == "" ]]; then docker network create --gateway "${SUBNET%.*}.1" --subnet "${SUBNET}" tiops else echo "Skip create tiup-cluster network" SUBNET=$(docker network inspect -f "{{range .IPAM.Config}}{{.Subnet}}{{end}}" tiops) fi if [[ "${SUBNET##*/}" -ne 24 ]]; then ERROR "Only subnet mask of 24 bits are currently supported" exit 1 fi ipprefix=${SUBNET%.*} ssh_proxy="False" proxy_prefix="" if [[ "${INCLUDE_PROXY_NODES}" -eq 1 ]]; then ssh_proxy="True" exist_network=$(docker network ls | awk '{if($2 == "tiproxy") print $1}') if [[ "$exist_network" == "" ]]; then docker network create --gateway "${PROXY_SUBNET%.*}.1" --subnet "${PROXY_SUBNET}" tiproxy else echo "Skip create tiup-cluster proxy network" SUBNET=$(docker network inspect -f "{{range .IPAM.Config}}{{.Subnet}}{{end}}" tiproxy) fi if [[ "${PROXY_SUBNET##*/}" -ne 24 ]]; then ERROR "Only proxy-subnet mask of 24 bits are currently supported" exit 1 fi proxy_prefix=${PROXY_SUBNET%.*} fi python -c "from jinja2 import Template; print(Template(open('docker-compose.yml.tpl').read()).render(nodes=$NODES, ipprefix='$ipprefix', ssh_proxy=$ssh_proxy, proxy_prefix='$proxy_prefix'))" > docker-compose.yml sed "s/__IPPREFIX__/$ipprefix/g" docker-compose.dm.yml.tpl > docker-compose.dm.yml sed -i '/TIUP_TEST_IP_PREFIX/d' ./secret/control.env echo "TIUP_TEST_IP_PREFIX=$ipprefix" >> ./secret/control.env INFO "Running \`docker-compose build\`" cp -a ${TIUP_CLUSTER_ROOT}/* ./control/ || true docker compose -f docker-compose.yml ${COMPOSE} build INFO "Running \`docker-compose up\`" if [ "${RUN_AS_DAEMON}" -eq 1 ]; then # shellcheck disable=SC2086 docker compose -f docker-compose.yml ${COMPOSE} up -d INFO "All containers started, run \`docker ps\` to view" else INFO "Please run \`docker exec -it tiup-cluster-control bash\` in another terminal to proceed" # shellcheck disable=SC2086 docker compose -f docker-compose.yml ${COMPOSE} up fi popd tiup-1.16.3/embed/000077500000000000000000000000001505422223000136325ustar00rootroot00000000000000tiup-1.16.3/embed/embed.go000066400000000000000000000006211505422223000152340ustar00rootroot00000000000000package embed import ( goembed "embed" ) //go:embed templates var embededFiles goembed.FS // ReadTemplate read the template file embed. func ReadTemplate(path string) ([]byte, error) { return embededFiles.ReadFile(path) } //go:embed examples var embedExamples goembed.FS // ReadExample read an example file func ReadExample(path string) ([]byte, error) { return embedExamples.ReadFile(path) } tiup-1.16.3/embed/embed_test.go000066400000000000000000000024241505422223000162760ustar00rootroot00000000000000package embed import ( "os" "path/filepath" "testing" "github.com/stretchr/testify/require" ) func getAllFilePaths(dir string) (paths []string, err error) { err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if path == dir { return nil } if info.IsDir() { subPaths, err := getAllFilePaths(path) if err != nil { return err } paths = append(paths, subPaths...) } else { paths = append(paths, path) } return nil }) return } // Test can read all file in /templates func TestCanReadTemplates(t *testing.T) { paths, err := getAllFilePaths("templates") require.Nil(t, err) require.Greater(t, len(paths), 0) for _, path := range paths { t.Log("check file: ", path) data, err := os.ReadFile(path) require.Nil(t, err) embedData, err := ReadTemplate(path) require.Nil(t, err) require.Equal(t, embedData, data) } } // Test can read all file in /examples func TestCanReadExamples(t *testing.T) { paths, err := getAllFilePaths("examples") require.Nil(t, err) require.Greater(t, len(paths), 0) for _, path := range paths { t.Log("check file: ", path) data, err := os.ReadFile(path) require.Nil(t, err) embedData, err := ReadExample(path) require.Nil(t, err) require.Equal(t, embedData, data) } } tiup-1.16.3/embed/examples/000077500000000000000000000000001505422223000154505ustar00rootroot00000000000000tiup-1.16.3/embed/examples/cluster/000077500000000000000000000000001505422223000171315ustar00rootroot00000000000000tiup-1.16.3/embed/examples/cluster/local.tpl000066400000000000000000000036221505422223000207470ustar00rootroot00000000000000# For more information about the format of the tiup cluster topology file, consult # https://docs.pingcap.com/tidb/stable/production-deployment-using-tiup#step-3-initialize-cluster-topology-file # # Global variables are applied to all deployments and used as the default value of # # the deployments if a specific deployment value is missing. global: # # The OS user who runs the tidb cluster. user: "{{ .GlobalUser }}" {{- if .GlobalGroup }} # group is used to specify the group name the user belong to if it's not the same as user. group: "{{ .GlobalGroup }}" {{- end }} {{- if .GlobalSystemdMode }} # # systemd_mode is used to select whether to use sudo permissions. systemd_mode: "{{ .GlobalSystemdMode }}" {{- end }} # # SSH port of servers in the managed cluster. ssh_port: {{ .GlobalSSHPort }} # # Storage directory for cluster deployment files, startup scripts, and configuration files. deploy_dir: "{{ .GlobalDeployDir }}" # # TiDB Cluster data storage directory data_dir: "{{ .GlobalDataDir }}" {{- if .GlobalArch }} # # Supported values: "amd64", "arm64" (default: "amd64") arch: "{{ .GlobalArch }}" {{- end }} {{ if .TiDBServers -}} pd_servers: {{- range .PDServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .TiDBServers -}} tidb_servers: {{- range .TiDBServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .TiKVServers -}} tikv_servers: {{- range .TiKVServers }} - host: {{ . }} {{- end }} {{ end }} {{- if .TiFlashServers }} tiflash_servers: {{- range .TiFlashServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .MonitoringServers -}} monitoring_servers: {{- range .MonitoringServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .GrafanaServers -}} grafana_servers: {{- range .GrafanaServers }} - host: {{ . }} {{- end }} {{ end }} {{- if .AlertManagerServers }} alertmanager_servers: {{- range .AlertManagerServers }} - host: {{ . }} {{- end }} {{ end }} tiup-1.16.3/embed/examples/cluster/minimal.yaml000066400000000000000000000255571505422223000214610ustar00rootroot00000000000000# # Global variables are applied to all deployments and used as the default value of # # the deployments if a specific deployment value is missing. global: # # The user who runs the tidb cluster. user: "tidb" # # group is used to specify the group name the user belong to if it's not the same as user. # group: "tidb" # # systemd_mode is used to select whether to use sudo permissions. When its value is set to user, there is no need to add global.user to sudoers. The default value is system. # systemd_mode: "system" # # SSH port of servers in the managed cluster. ssh_port: 22 # # Storage directory for cluster deployment files, startup scripts, and configuration files. deploy_dir: "/tidb-deploy" # # TiDB Cluster data storage directory data_dir: "/tidb-data" # # default listen_host for all components listen_host: 0.0.0.0 # # Supported values: "amd64", "arm64" (default: "amd64") arch: "amd64" # # Resource Control is used to limit the resource of an instance. # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html # # Supports using instance-level `resource_control` to override global `resource_control`. # resource_control: # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#MemoryLimit=bytes # memory_limit: "2G" # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#CPUQuota= # # The percentage specifies how much CPU time the unit shall get at maximum, relative to the total CPU time available on one CPU. Use values > 100% for allotting CPU time on more than one CPU. # # Example: CPUQuota=200% ensures that the executed processes will never get more than two CPU time. # cpu_quota: "200%" # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#IOReadBandwidthMax=device%20bytes # io_read_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" # io_write_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" # # Monitored variables are applied to all the machines. monitored: # # The communication port for reporting system information of each node in the TiDB cluster. node_exporter_port: 9100 # # Blackbox_exporter communication port, used for TiDB cluster port monitoring. blackbox_exporter_port: 9115 # # Storage directory for deployment files, startup scripts, and configuration files of monitoring components. # deploy_dir: "/tidb-deploy/monitored-9100" # # Data storage directory of monitoring components. # data_dir: "/tidb-data/monitored-9100" # # Log storage directory of the monitoring component. # log_dir: "/tidb-deploy/monitored-9100/log" # # Server configs are used to specify the runtime configuration of TiDB components. # # All configuration items can be found in TiDB docs: # # - TiDB: https://docs.pingcap.com/tidb/stable/tidb-configuration-file # # - TiKV: https://docs.pingcap.com/tidb/stable/tikv-configuration-file # # - PD: https://docs.pingcap.com/tidb/stable/pd-configuration-file # # - TiFlash: https://docs.pingcap.com/tidb/stable/tiflash-configuration # # # # All configuration items use points to represent the hierarchy, e.g: # # readpool.storage.use-unified-pool # # ^ ^ # # - example: https://github.com/pingcap/tiup/blob/master/examples/topology.example.yaml. # # You can overwrite this configuration via the instance-level `config` field. # server_configs: # tidb: # tikv: # pd: # tiflash: # tiflash-learner: # # Server configs are used to specify the configuration of PD Servers. pd_servers: # # The ip address of the PD Server. - host: 10.0.1.11 # # SSH port of the server. # ssh_port: 22 # # PD Server name # name: "pd-1" # # communication port for TiDB Servers to connect. # client_port: 2379 # # Communication port among PD Server nodes. # peer_port: 2380 # # PD Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/pd-2379" # # PD Server data storage directory. # data_dir: "/tidb-data/pd-2379" # # PD Server log file storage directory. # log_dir: "/tidb-deploy/pd-2379/log" # # numa node bindings. # numa_node: "0,1" # # The following configs are used to overwrite the `server_configs.pd` values. # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 - host: 10.0.1.12 # ssh_port: 22 # name: "pd-1" # client_port: 2379 # peer_port: 2380 # deploy_dir: "/tidb-deploy/pd-2379" # data_dir: "/tidb-data/pd-2379" # log_dir: "/tidb-deploy/pd-2379/log" # numa_node: "0,1" # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 - host: 10.0.1.13 # ssh_port: 22 # name: "pd-1" # client_port: 2379 # peer_port: 2380 # deploy_dir: "/tidb-deploy/pd-2379" # data_dir: "/tidb-data/pd-2379" # log_dir: "/tidb-deploy/pd-2379/log" # numa_node: "0,1" # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 # # Server configs are used to specify the configuration of TiDB Servers. tidb_servers: # # The ip address of the TiDB Server. - host: 10.0.1.14 # # SSH port of the server. # ssh_port: 22 # # The port for clients to access the TiDB cluster. # port: 4000 # # TiDB Server status API port. # status_port: 10080 # # TiDB Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/tidb-4000" # # TiDB Server log file storage directory. # log_dir: "/tidb-deploy/tidb-4000/log" # # The ip address of the TiDB Server. - host: 10.0.1.15 # ssh_port: 22 # port: 4000 # status_port: 10080 # deploy_dir: "/tidb-deploy/tidb-4000" # log_dir: "/tidb-deploy/tidb-4000/log" - host: 10.0.1.16 # ssh_port: 22 # port: 4000 # status_port: 10080 # deploy_dir: "/tidb-deploy/tidb-4000" # log_dir: "/tidb-deploy/tidb-4000/log" # # Server configs are used to specify the configuration of TiKV Servers. tikv_servers: # # The ip address of the TiKV Server. - host: 10.0.1.17 # # SSH port of the server. # ssh_port: 22 # # TiKV Server communication port. # port: 20160 # # TiKV Server status API port. # status_port: 20180 # # TiKV Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/tikv-20160" # # TiKV Server data storage directory. # data_dir: "/tidb-data/tikv-20160" # # TiKV Server log file storage directory. # log_dir: "/tidb-deploy/tikv-20160/log" # # The following configs are used to overwrite the `server_configs.tikv` values. # config: # log.level: warn # # The ip address of the TiKV Server. - host: 10.0.1.18 # ssh_port: 22 # port: 20160 # status_port: 20180 # deploy_dir: "/tidb-deploy/tikv-20160" # data_dir: "/tidb-data/tikv-20160" # log_dir: "/tidb-deploy/tikv-20160/log" # config: # log.level: warn - host: 10.0.1.19 # ssh_port: 22 # port: 20160 # status_port: 20180 # deploy_dir: "/tidb-deploy/tikv-20160" # data_dir: "/tidb-data/tikv-20160" # log_dir: "/tidb-deploy/tikv-20160/log" # config: # log.level: warn # # Server configs are used to specify the configuration of TiFlash Servers. tiflash_servers: # # The ip address of the TiFlash Server. - host: 10.0.1.20 # # SSH port of the server. # ssh_port: 22 # # TiFlash TCP Service port. # # Since 7.1.0, it is not actually listened, and only being used as part of the instance identity. # tcp_port: 9000 # # TiFlash raft service and coprocessor service listening address. # flash_service_port: 3930 # # TiFlash Proxy service port. # flash_proxy_port: 20170 # # TiFlash Proxy metrics port. # flash_proxy_status_port: 20292 # # TiFlash metrics port. # metrics_port: 8234 # # TiFlash Server deployment file, startup script, configuration file storage directory. # deploy_dir: /tidb-deploy/tiflash-9000 ## With cluster version >= v4.0.9 and you want to deploy a multi-disk TiFlash node, it is recommended to ## check config.storage.* for details. The data_dir will be ignored if you defined those configurations. ## Setting data_dir to a ','-joined string is still supported but deprecated. ## Check https://docs.pingcap.com/tidb/stable/tiflash-configuration#multi-disk-deployment for more details. # # TiFlash Server data storage directory. # data_dir: /tidb-data/tiflash-9000 # # TiFlash Server log file storage directory. # log_dir: /tidb-deploy/tiflash-9000/log # # The following configs are used to overwrite the `server_configs.tiflash` values. # config: # logger.level: info # # The following configs are used to overwrite the `server_configs.tiflash-learner` values. # learner_config: # log.level: info - host: 10.0.1.21 # ssh_port: 22 # tcp_port: 9000 # flash_service_port: 3930 # flash_proxy_port: 20170 # flash_proxy_status_port: 20292 # metrics_port: 8234 # deploy_dir: /tidb-deploy/tiflash-9000 # data_dir: /tidb-data/tiflash-9000 # log_dir: /tidb-deploy/tiflash-9000/log # # Server configs are used to specify the configuration of Prometheus Server. monitoring_servers: # # The ip address of the Monitoring Server. - host: 10.0.1.22 # # SSH port of the server. # ssh_port: 22 # # Prometheus Service communication port. # port: 9090 # # ng-monitoring servive communication port # ng_port: 12020 # # Prometheus deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/prometheus-8249" # # Prometheus data storage directory. # data_dir: "/tidb-data/prometheus-8249" # # Prometheus log file storage directory. # log_dir: "/tidb-deploy/prometheus-8249/log" # # Server configs are used to specify the configuration of Grafana Servers. grafana_servers: # # The ip address of the Grafana Server. - host: 10.0.1.22 # # Grafana web port (browser access) # port: 3000 # # Grafana deployment file, startup script, configuration file storage directory. # deploy_dir: /tidb-deploy/grafana-3000 # # Server configs are used to specify the configuration of Alertmanager Servers. alertmanager_servers: # # The ip address of the Alertmanager Server. - host: 10.0.1.22 # # SSH port of the server. # ssh_port: 22 # # Alertmanager web service port. # web_port: 9093 # # Alertmanager communication port. # cluster_port: 9094 # # Alertmanager deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/alertmanager-9093" # # Alertmanager data storage directory. # data_dir: "/tidb-data/alertmanager-9093" # # Alertmanager log file storage directory. # log_dir: "/tidb-deploy/alertmanager-9093/log" tiup-1.16.3/embed/examples/cluster/multi-dc.yaml000066400000000000000000000316501505422223000215400ustar00rootroot00000000000000# # Global variables are applied to all deployments and used as the default value of # # the deployments if a specific deployment value is missing. global: # # The user who runs the tidb cluster. user: "tidb" # # group is used to specify the group name the user belong to,if it's not the same as user. # group: "tidb" # # SSH port of servers in the managed cluster. # # systemd_mode is used to select whether to use sudo permissions. When its value is set to user, there is no need to add global.user to sudoers. The default value is system. # systemd_mode: "system" ssh_port: 22 # # Storage directory for cluster deployment files, startup scripts, and configuration files. deploy_dir: "/tidb-deploy" # # TiDB Cluster data storage directory data_dir: "/tidb-data" # # default listen_host for all components listen_host: 0.0.0.0 # # Supported values: "amd64", "arm64" (default: "amd64") arch: "amd64" # # Resource Control is used to limit the resource of an instance. # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html # # Supports using instance-level `resource_control` to override global `resource_control`. # resource_control: # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#MemoryLimit=bytes # memory_limit: "2G" # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#CPUQuota= # # The percentage specifies how much CPU time the unit shall get at maximum, relative to the total CPU time available on one CPU. Use values > 100% for allotting CPU time on more than one CPU. # # Example: CPUQuota=200% ensures that the executed processes will never get more than two CPU time. # cpu_quota: "200%" # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#IOReadBandwidthMax=device%20bytes # io_read_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" # io_write_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" # # Monitored variables are applied to all the machines. monitored: # # The communication port for reporting system information of each node in the TiDB cluster. node_exporter_port: 9100 # # Blackbox_exporter communication port, used for TiDB cluster port monitoring. blackbox_exporter_port: 9115 # # Storage directory for deployment files, startup scripts, and configuration files of monitoring components. # deploy_dir: "/tidb-deploy/monitored-9100" # # Data storage directory of monitoring components. # data_dir: "/tidb-data/monitored-9100" # # Log storage directory of the monitoring component. # log_dir: "/tidb-deploy/monitored-9100/log" # # Server configs are used to specify the runtime configuration of TiDB components. # # All configuration items can be found in TiDB docs: # # - TiDB: https://pingcap.com/docs/stable/reference/configuration/tidb-server/configuration-file/ # # - TiKV: https://pingcap.com/docs/stable/reference/configuration/tikv-server/configuration-file/ # # - PD: https://pingcap.com/docs/stable/reference/configuration/pd-server/configuration-file/ # # - TiFlash: https://docs.pingcap.com/tidb/stable/tiflash-configuration # # # # All configuration items use points to represent the hierarchy, e.g: # # readpool.storage.use-unified-pool # # ^ ^ # # - example: https://github.com/pingcap/tiup/blob/master/examples/topology.example.yaml. # # You can overwrite this configuration via the instance-level `config` field. server_configs: # tidb: tikv: # # Compression algorithm for gRPC messages server.grpc-compression-type: gzip # tiflash: # tiflash-learner: pd: # # Label level settings. replication.location-labels: ["zone","dc","rack","host"] # # Number of replicas of TiKV data replication.max-replicas: 5 # # Prohibit the leader be scheduled to the specified label place. label-property: reject-leader: - key: "dc" value: "sha" # # Server configs are used to specify the configuration of PD Servers. pd_servers: # # The ip address of the PD Server. - host: 10.0.1.11 # # SSH port of the server. # ssh_port: 22 # # PD Server name # name: "pd-1" # # communication port for TiDB Servers to connect. # client_port: 2379 # # communication port among PD Server nodes. # peer_port: 2380 # # PD Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/pd-2379" # # PD Server data storage directory. # data_dir: "/tidb-data/pd-2379" # # PD Server log file storage directory. # log_dir: "/tidb-deploy/pd-2379/log" # # numa node bindings. # numa_node: "0,1" # # The following configs are used to overwrite the `server_configs.pd` values. # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 - host: 10.0.1.13 # ssh_port: 22 # name: "pd-1" # client_port: 2379 # peer_port: 2380 # deploy_dir: "/tidb-deploy/pd-2379" # data_dir: "/tidb-data/pd-2379" # log_dir: "/tidb-deploy/pd-2379/log" # numa_node: "0,1" # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 - host: 10.0.1.15 # ssh_port: 22 # name: "pd-1" # client_port: 2379 # peer_port: 2380 # deploy_dir: "/tidb-deploy/pd-2379" # data_dir: "/tidb-data/pd-2379" # log_dir: "/tidb-deploy/pd-2379/log" # numa_node: "0,1" # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 # # Server configs are used to specify the configuration of TiDB Servers. tidb_servers: # # The ip address of the TiDB Server. - host: 10.0.1.16 # # SSH port of the server. # ssh_port: 22 # # Access the TiDB cluster port. # port: 4000 # # TiDB Server status information reporting port. # status_port: 10080 # # TiDB Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/tidb-4000" # # TiDB Server log file storage directory. # log_dir: "/tidb-deploy/tidb-4000/log" # # The ip address of the TiDB Server. - host: 10.0.1.17 # ssh_port: 22 # port: 4000 # status_port: 10080 # deploy_dir: "/tidb-deploy/tidb-4000" # log_dir: "/tidb-deploy/tidb-4000/log" - host: 10.0.1.18 # ssh_port: 22 # port: 4000 # status_port: 10080 # deploy_dir: "/tidb-deploy/tidb-4000" # log_dir: "/tidb-deploy/tidb-4000/log" # # Server configs are used to specify the configuration of TiKV Servers. tikv_servers: # # The ip address of the TiKV Server. - host: 10.0.1.19 # # SSH port of the server. # ssh_port: 22 # # TiKV Server communication port. # port: 20160 # # Communication port for reporting TiKV Server status. # status_port: 20180 # # TiKV Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/tikv-20160" # # TiKV Server data storage directory. # data_dir: "/tidb-data/tikv-20160" # # TiKV Server log file storage directory. # log_dir: "/tidb-deploy/tikv-20160/log" # # The following configs are used to overwrite the `server_configs.tikv` values. config: server.labels: zone: bj dc: bja rack: rack1 host: host1 # # The ip address of the TiKV Server. - host: 10.0.1.20 # ssh_port: 22 # port: 20160 # status_port: 20180 # deploy_dir: "/tidb-deploy/tikv-20160" # data_dir: "/tidb-data/tikv-20160" # log_dir: "/tidb-deploy/tikv-20160/log" config: server.labels: zone: bj dc: bja rack: rack1 host: host2 - host: 10.0.1.21 # ssh_port: 22 # port: 20160 # status_port: 20180 # deploy_dir: "/tidb-deploy/tikv-20160" # data_dir: "/tidb-data/tikv-20160" # log_dir: "/tidb-deploy/tikv-20160/log" config: server.labels: zone: bj dc: bjb rack: rack1 host: host1 - host: 10.0.1.22 # ssh_port: 22 # port: 20160 # status_port: 20180 # deploy_dir: "/tidb-deploy/tikv-20160" # data_dir: "/tidb-data/tikv-20160" # log_dir: "/tidb-deploy/tikv-20160/log" config: server.labels: zone: bj dc: bjb rack: rack1 host: host2 - host: 10.0.1.23 # ssh_port: 22 # port: 20160 # status_port: 20180 # deploy_dir: "/tidb-deploy/tikv-20160" # data_dir: "/tidb-data/tikv-20160" # log_dir: "/tidb-deploy/tikv-20160/log" config: server.labels: zone: sh dc: sha rack: rack1 host: host1 # # The minimum number of ticks during which the Raft election is initiated. raftstore.raft-min-election-timeout-ticks: 1000 # # The maximum number of ticks during which the Raft election is initiated. raftstore.raft-max-election-timeout-ticks: 1020 # # Server configs are used to specify the configuration of TiFlash Servers. tiflash_servers: # # The ip address of the TiFlash Server. - host: 10.0.1.24 # # SSH port of the server. # ssh_port: 22 # # TiFlash TCP Service port. # # Since 7.1.0, it is not actually listened, and only being used as part of the instance identity. # tcp_port: 9000 # # TiFlash raft service and coprocessor service listening address. # flash_service_port: 3930 # # TiFlash Proxy service port. # flash_proxy_port: 20170 # # Prometheus pulls TiFlash Proxy metrics port. # flash_proxy_status_port: 20292 # # Prometheus pulls the TiFlash metrics port. # metrics_port: 8234 # # TiFlash Server deployment file, startup script, configuration file storage directory. # deploy_dir: /tidb-deploy/tiflash-9000 ## With cluster version >= v4.0.9 and you want to deploy a multi-disk TiFlash node, it is recommended to ## check config.storage.* for details. The data_dir will be ignored if you defined those configurations. ## Setting data_dir to a ','-joined string is still supported but deprecated. ## Check https://docs.pingcap.com/tidb/stable/tiflash-configuration#multi-disk-deployment for more details. # # TiFlash Server data storage directory. # data_dir: /tidb-data/tiflash-9000 # # TiFlash Server log file storage directory. # log_dir: /tidb-deploy/tiflash-9000/log # # The following configs are used to overwrite the `server_configs.tiflash` values. # config: # logger.level: info # # The following configs are used to overwrite the `server_configs.tiflash-learner` values. learner_config: server.labels: zone: bj dc: bja rack: rack1 host: tiflash-host1 - host: 10.0.1.25 # ssh_port: 22 # tcp_port: 9000 # flash_service_port: 3930 # flash_proxy_port: 20170 # flash_proxy_status_port: 20292 # metrics_port: 8234 # deploy_dir: /tidb-deploy/tiflash-9000 # data_dir: /tidb-data/tiflash-9000 # log_dir: /tidb-deploy/tiflash-9000/log # # The following configs are used to overwrite the `server_configs.tiflash` values. # config: # logger.level: info # # The following configs are used to overwrite the `server_configs.tiflash-learner` values. learner_config: server.labels: zone: bj dc: bjb rack: rack1 host: tiflash-host2 # # Server configs are used to specify the configuration of Prometheus Server. monitoring_servers: # # The ip address of the Monitoring Server. - host: 10.0.1.26 # # SSH port of the server. # ssh_port: 22 # # Prometheus Service communication port. # port: 9090 # # ng-monitoring servive communication port # ng_port: 12020 # # Prometheus deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/prometheus-8249" # # Prometheus data storage directory. # data_dir: "/tidb-data/prometheus-8249" # # Prometheus log file storage directory. # log_dir: "/tidb-deploy/prometheus-8249/log" # # Server configs are used to specify the configuration of Grafana Servers. grafana_servers: # # The ip address of the Grafana Server. - host: 10.0.1.26 # # Grafana Web monitoring service client (browser) access port # port: 3000 # # Grafana deployment file, startup script, configuration file storage directory. # deploy_dir: /tidb-deploy/grafana-3000 # # Server configs are used to specify the configuration of Alertmanager Servers. alertmanager_servers: # # The ip address of the Alertmanager Server. - host: 10.0.1.26 # # SSH port of the server. # ssh_port: 22 # # Alertmanager web service port. # web_port: 9093 # # Alertmanager communication port. # cluster_port: 9094 # # Alertmanager deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/alertmanager-9093" # # Alertmanager data storage directory. # data_dir: "/tidb-data/alertmanager-9093" # # Alertmanager log file storage directory. # log_dir: "/tidb-deploy/alertmanager-9093/log" tiup-1.16.3/embed/examples/cluster/topology.example.yaml000066400000000000000000000364471505422223000233410ustar00rootroot00000000000000# # Global variables are applied to all deployments and used as the default value of # # the deployments if a specific deployment value is missing. global: # # The user who runs the tidb cluster. user: "tidb" # # group is used to specify the group name the user belong to,if it's not the same as user. # group: "tidb" # # SSH port of servers in the managed cluster. # # systemd_mode is used to select whether to use sudo permissions. When its value is set to user, there is no need to add global.user to sudoers. The default value is system. # systemd_mode: "system" ssh_port: 22 # # Storage directory for cluster deployment files, startup scripts, and configuration files. deploy_dir: "/tidb-deploy" # # TiDB Cluster data storage directory data_dir: "/tidb-data" # # default listen_host for all components listen_host: 0.0.0.0 # # Supported values: "amd64", "arm64" (default: "amd64") arch: "amd64" # # Resource Control is used to limit the resource of an instance. # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html # # Supports using instance-level `resource_control` to override global `resource_control`. # resource_control: # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#MemoryLimit=bytes # memory_limit: "2G" # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#CPUQuota= # # The percentage specifies how much CPU time the unit shall get at maximum, relative to the total CPU time available on one CPU. Use values > 100% for allotting CPU time on more than one CPU. # # Example: CPUQuota=200% ensures that the executed processes will never get more than two CPU time. # cpu_quota: "200%" # # See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#IOReadBandwidthMax=device%20bytes # io_read_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" # io_write_bandwidth_max: "/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0 100M" # # Monitored variables are applied to all the machines. monitored: # # The communication port for reporting system information of each node in the TiDB cluster. node_exporter_port: 9100 # # Blackbox_exporter communication port, used for TiDB cluster port monitoring. blackbox_exporter_port: 9115 # # Storage directory for deployment files, startup scripts, and configuration files of monitoring components. # deploy_dir: "/tidb-deploy/monitored-9100" # # Data storage directory of monitoring components. # data_dir: "/tidb-data/monitored-9100" # # Log storage directory of the monitoring component. # log_dir: "/tidb-deploy/monitored-9100/log" # # Server configs are used to specify the runtime configuration of TiDB components. # # All configuration items can be found in TiDB docs: # # - TiDB: https://docs.pingcap.com/tidb/stable/tidb-configuration-file # # - TiKV: https://docs.pingcap.com/tidb/stable/tikv-configuration-file # # - PD: https://docs.pingcap.com/tidb/stable/pd-configuration-file # # - TiFlash: https://docs.pingcap.com/tidb/stable/tiflash-configuration # # # # All configuration items use points to represent the hierarchy, e.g: # # readpool.storage.use-unified-pool # # ^ ^ # # - example: https://github.com/pingcap/tiup/blob/master/embed/examples/cluster/topology.example.yaml # # You can overwrite this configuration via the instance-level `config` field. # server_configs: # tidb: # tikv: # pd: # tiflash: # tiflash-learner: # kvcdc: # # Server configs are used to specify the configuration of PD Servers. pd_servers: # # The ip address of the PD Server. - host: 10.0.1.11 # # SSH port of the server. # ssh_port: 22 # # PD Server name # name: "pd-1" # # communication port for TiDB Servers to connect. # client_port: 2379 # # communication port among PD Server nodes. # peer_port: 2380 # # PD Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/pd-2379" # # PD Server data storage directory. # data_dir: "/tidb-data/pd-2379" # # PD Server log file storage directory. # log_dir: "/tidb-deploy/pd-2379/log" # # numa node bindings. # numa_node: "0,1" # # The following configs are used to overwrite the `server_configs.pd` values. # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 - host: 10.0.1.12 # ssh_port: 22 # name: "pd-1" # client_port: 2379 # peer_port: 2380 # deploy_dir: "/tidb-deploy/pd-2379" # data_dir: "/tidb-data/pd-2379" # log_dir: "/tidb-deploy/pd-2379/log" # numa_node: "0,1" # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 - host: 10.0.1.13 # ssh_port: 22 # name: "pd-1" # client_port: 2379 # peer_port: 2380 # deploy_dir: "/tidb-deploy/pd-2379" # data_dir: "/tidb-data/pd-2379" # log_dir: "/tidb-deploy/pd-2379/log" # numa_node: "0,1" # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 # # Server configs are used to specify the configuration of TiDB Servers. tidb_servers: # # The ip address of the TiDB Server. - host: 10.0.1.14 # # SSH port of the server. # ssh_port: 22 # # Access the TiDB cluster port. port: 4000 # # TiDB Server status information reporting port. status_port: 10080 # # TiDB Server deployment file, startup script, configuration file storage directory. deploy_dir: "/tidb-deploy/tidb-4000" # # TiDB Server log file storage directory. log_dir: "/tidb-deploy/tidb-4000/log" # numa_node: "0" # suggest numa node bindings. - host: 10.0.1.14 # ssh_port: 22 port: 4001 status_port: 10081 deploy_dir: "/tidb-deploy/tidb-4001" log_dir: "/tidb-deploy/tidb-4001/log" # numa_node: "1" # suggest numa node bindings. - host: 10.0.1.15 # ssh_port: 22 port: 4000 status_port: 10080 deploy_dir: "/tidb-deploy/tidb-4000" log_dir: "/tidb-deploy/tidb-4000/log" # numa_node: "0" # suggest numa node bindings. - host: 10.0.1.15 # ssh_port: 22 port: 4001 status_port: 10081 deploy_dir: "/tidb-deploy/tidb-4001" log_dir: "/tidb-deploy/tidb-4001/log" # numa_node: "1" # suggest numa node bindings. # # Server configs are used to specify the configuration of TiKV Servers. tikv_servers: # # The ip address of the TiKV Server. - host: 10.0.1.16 # # SSH port of the server. # ssh_port: 22 # # TiKV Server communication port. port: 20160 # # Communication port for reporting TiKV Server status. status_port: 20180 # # TiKV Server deployment file, startup script, configuration file storage directory. deploy_dir: "/data1/tidb-deploy/tikv-20160" # # TiKV Server data storage directory. data_dir: "/data1/tidb-data/tikv-20160" # # TiKV Server log file storage directory. log_dir: "/data1/tidb-deploy/tikv-20160/log" # numa_node: "0" # # The following configs are used to overwrite the `server_configs.tikv` values. # config: # log.level: warn - host: 10.0.1.16 # ssh_port: 22 port: 20161 status_port: 20181 deploy_dir: "/data2/tidb-deploy/tikv-20161" data_dir: "/data2/tidb-data/tikv-20161" log_dir: "/data2/tidb-deploy/tikv-20161/log" # numa_node: "1" # config: # log.level: warn - host: 10.0.1.17 # ssh_port: 22 port: 20160 status_port: 20180 deploy_dir: "/data1/tidb-deploy/tikv-20160" data_dir: "/data1/tidb-data/tikv-20160" log_dir: "/data1/tidb-deploy/tikv-20160/log" # numa_node: "0" # config: # log.level: warn - host: 10.0.1.17 # ssh_port: 22 port: 20161 status_port: 20181 deploy_dir: "/data2/tidb-deploy/tikv-20161" data_dir: "/data2/tidb-data/tikv-20161" log_dir: "/data2/tidb-deploy/tikv-20161/log" # numa_node: "1" # config: # log.level: warn - host: 10.0.1.18 # ssh_port: 22 port: 20160 status_port: 20180 deploy_dir: "/data1/tidb-deploy/tikv-20160" data_dir: "/data1/tidb-data/tikv-20160" log_dir: "/data1/tidb-deploy/tikv-20160/log" # numa_node: "0" # config: # log.level: warn - host: 10.0.1.18 # ssh_port: 22 port: 20161 status_port: 20181 deploy_dir: "/data2/tidb-deploy/tikv-20161" data_dir: "/data2/tidb-data/tikv-20161" log_dir: "/data2/tidb-deploy/tikv-20161/log" # numa_node: "1" # config: # log.level: warn # # Server configs are used to specify the configuration of TiFlash Servers. tiflash_servers: # # The ip address of the TiFlash Server. - host: 10.0.1.19 # # SSH port of the server. # ssh_port: 22 # # TiFlash TCP Service port. # # Since 7.1.0, it is not actually listened, and only being used as part of the instance identity. tcp_port: 9000 # # TiFlash raft service and coprocessor service listening address. flash_service_port: 3930 # # TiFlash Proxy service port. flash_proxy_port: 20170 # # Prometheus pulls TiFlash Proxy metrics port. flash_proxy_status_port: 20292 # # Prometheus pulls the TiFlash metrics port. metrics_port: 8234 # # TiFlash Server deployment file, startup script, configuration file storage directory. deploy_dir: /data1/tidb-deploy/tiflash-9000 ## With cluster version >= v4.0.9 and you want to deploy a multi-disk TiFlash node, it is recommended to ## check config.storage.* for details. The data_dir will be ignored if you defined those configurations. ## Setting data_dir to a ','-joined string is still supported but deprecated. ## Check https://docs.pingcap.com/tidb/stable/tiflash-configuration#multi-disk-deployment for more details. # # TiFlash Server data storage directory. data_dir: /data1/tidb-data/tiflash-9000 # # TiFlash Server log file storage directory. log_dir: /data1/tidb-deploy/tiflash-9000/log # # The following configs are used to overwrite the `server_configs.tiflash` values. # config: # logger.level: info # # The following configs are used to overwrite the `server_configs.tiflash-learner` values. # learner_config: # log.level: info - host: 10.0.1.19 # ssh_port: 22 tcp_port: 9001 flash_service_port: 3931 flash_proxy_port: 20171 flash_proxy_status_port: 20293 metrics_port: 8235 deploy_dir: /data2/tidb-deploy/tiflash-9001 data_dir: /data2/tidb-data/tiflash-9001 log_dir: /data2/tidb-deploy/tiflash-9001/log - host: 10.0.1.20 # ssh_port: 22 tcp_port: 9000 flash_service_port: 3930 flash_proxy_port: 20170 flash_proxy_status_port: 20292 metrics_port: 8234 deploy_dir: /data1/tidb-deploy/tiflash-9000 data_dir: /data1/tidb-data/tiflash-9000 log_dir: /data1/tidb-deploy/tiflash-9000/log - host: 10.0.1.20 # ssh_port: 22 tcp_port: 9001 flash_service_port: 3931 flash_proxy_port: 20171 flash_proxy_status_port: 20293 metrics_port: 8235 deploy_dir: /data2/tidb-deploy/tiflash-9001 data_dir: /data2/tidb-data/tiflash-9001 log_dir: /data2/tidb-deploy/tiflash-9001/log # # Server configs are used to specify the configuration of TiKV-CDC Servers. kvcdc_servers: - host: 10.0.1.20 # # SSH port of the server. # ssh_port: 22 # # TiKV-CDC Server communication port. port: 8600 # # TiKV-CDC Server data storage directory. data_dir: "/data1/tidb-data/tikv-cdc-8600" # # TiKV-CDC Server log file storage directory. log_dir: "/data1/tidb-deploy/tikv-cdc-8600/log" - host: 10.0.1.21 data_dir: "/data1/tidb-data/tikv-cdc-8600" log_dir: "/data1/tidb-deploy/tikv-cdc-8600/log" # # Server configs are used to specify the configuration of TiCDC Servers. #cdc_servers: # - host: 10.0.1.20 # # SSH port of the server. # ssh_port: 22 # # TiCDC Server communication port. # port: 8300 # # TiCDC Server data storage directory. # data_dir: "/data1/tidb-deploy/cdc-8300" # # TiCDC Server log file storage directory. # log_dir: "/data1/tidb-deploy/cdc-8300/log" # # TiCDC Server deployment file, startup script, configuration file storage directory. # deploy_dir: "/data1/tidb-deploy/cdc-8300" # gc-ttl: 86400 # 24h # tz: "System" # numa_node: "0,1" # config: # log.level: warn # ticdc_cluster_id: "default" # - host: 10.0.1.21 # ssh_port: 22 # port: 8301 # data_dir: "/data2/tidb-deploy/cdc-8301" # log_dir: "/data2/tidb-deploy/cdc-8301/log" # deploy_dir: "/data2/tidb-deploy/cdc-8301" # # Server configs are used to specify the configuration of TiDB Dashboard Servers. Available from v6.5.0 # tidb_dashboard_servers: # # The ip address of the PD Server. # - host: 10.0.1.11 # # SSH port of the server. # ssh_port: 22 # # port of TiDB Dashboard # port: 12333 # # TiDB Dashboard deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/tidb-dashboard-12333" # # PD Server data storage directory. # data_dir: "/tidb-data/tidb-dashboard-12333" # # PD Server log file storage directory. # log_dir: "/tidb-deploy/tidb-dashboard-12333/log" # # numa node bindings. # numa_node: "0,1" # # Server configs are used to specify the configuration of Prometheus Server. monitoring_servers: # # The ip address of the Monitoring Server. - host: 10.0.1.21 # # SSH port of the server. # ssh_port: 22 # # Prometheus Service communication port. # port: 9090 # # ng-monitoring servive communication port # ng_port: 12020 # # Prometheus deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/prometheus-8249" # # Prometheus data storage directory. # data_dir: "/tidb-data/prometheus-8249" # # Prometheus log file storage directory. # log_dir: "/tidb-deploy/prometheus-8249/log" # prometheus rule dir on TiUP machine # rule_dir: /home/tidb/prometheus_rule # scrape_interval: 15s # scrape_timeout: 10s # # Server configs are used to specify the configuration of Grafana Servers. grafana_servers: # # The ip address of the Grafana Server. - host: 10.0.1.21 # # Grafana Web monitoring service client (browser) access port # port: 3000 # # Grafana deployment file, startup script, configuration file storage directory. # deploy_dir: /tidb-deploy/grafana-3000 # grafana dashboard dir on TiUP machine # dashboard_dir: /home/tidb/dashboards # config: # log.file.level: warning # # Server configs are used to specify the configuration of Alertmanager Servers. alertmanager_servers: # # The ip address of the Alertmanager Server. - host: 10.0.1.21 # # SSH port of the server. # ssh_port: 22 # Alertmanager web service listen host. # listen_host: 0.0.0.0 # # Alertmanager web service port. # web_port: 9093 # # Alertmanager communication port. # cluster_port: 9094 # # Alertmanager deployment file, startup script, configuration file storage directory. # deploy_dir: "/tidb-deploy/alertmanager-9093" # # Alertmanager data storage directory. # data_dir: "/tidb-data/alertmanager-9093" # # Alertmanager log file storage directory. # log_dir: "/tidb-deploy/alertmanager-9093/log" # # Alertmanager config file storage directory. # config_file: "/tidb-deploy/alertmanager-9093/bin/alertmanager/alertmanager.yml" tiup-1.16.3/embed/examples/dm/000077500000000000000000000000001505422223000160505ustar00rootroot00000000000000tiup-1.16.3/embed/examples/dm/local.tpl000066400000000000000000000031571505422223000176710ustar00rootroot00000000000000# The topology template is used deploy a minimal DM cluster, which suitable # for scenarios with only three machinescontains. The minimal cluster contains # - 3 master nodes # - 3 worker nodes # You can change the hosts according your environment --- global: # # The OS user who runs the tidb cluster. user: "{{ .GlobalUser }}" {{- if .GlobalGroup }} # group is used to specify the group name the user belong to if it's not the same as user. group: "{{ .GlobalGroup }}" {{- end }} {{- if .GlobalSystemdMode }} # # systemd_mode is used to select whether to use sudo permissions. systemd_mode: "{{ .GlobalSystemdMode }}" {{- end }} # # SSH port of servers in the managed cluster. ssh_port: {{ .GlobalSSHPort }} # # Storage directory for cluster deployment files, startup scripts, and configuration files. deploy_dir: "{{ .GlobalDeployDir }}" # # TiDB Cluster data storage directory data_dir: "{{ .GlobalDataDir }}" {{- if .GlobalArch }} # # Supported values: "amd64", "arm64" (default: "amd64") arch: "{{ .GlobalArch }}" {{- end }} {{ if .MasterServers -}} master_servers: {{- range .MasterServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .WorkerServers -}} worker_servers: {{- range .WorkerServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .MonitoringServers -}} monitoring_servers: {{- range .MonitoringServers }} - host: {{ . }} {{- end }} {{ end }} {{ if .GrafanaServers -}} grafana_servers: {{- range .GrafanaServers }} - host: {{ . }} {{- end }} {{ end }} {{- if .AlertManagerServers }} alertmanager_servers: {{- range .AlertManagerServers }} - host: {{ . }} {{- end }} {{ end }} tiup-1.16.3/embed/examples/dm/minimal.yaml000066400000000000000000000013031505422223000203570ustar00rootroot00000000000000# The topology template is used deploy a minimal DM cluster, which suitable # for scenarios with only three machinescontains. The minimal cluster contains # - 3 master nodes # - 3 worker nodes # You can change the hosts according your environment --- global: user: "tidb" # systemd_mode: "system" ssh_port: 22 deploy_dir: "/home/tidb/dm/deploy" data_dir: "/home/tidb/dm/data" # arch: "amd64" master_servers: - host: 172.19.0.101 - host: 172.19.0.102 - host: 172.19.0.103 worker_servers: - host: 172.19.0.101 - host: 172.19.0.102 - host: 172.19.0.103 monitoring_servers: - host: 172.19.0.101 grafana_servers: - host: 172.19.0.101 alertmanager_servers: - host: 172.19.0.101 tiup-1.16.3/embed/examples/dm/topology.example.yaml000066400000000000000000000045301505422223000222440ustar00rootroot00000000000000--- # Global variables are applied to all deployments and as the default value of # them if the specific deployment value missing. global: user: "tidb" # systemd_mode: "system" ssh_port: 22 deploy_dir: "/dm-deploy" data_dir: "/dm-data" server_configs: master: log-level: info # rpc-timeout: "30s" # rpc-rate-limit: 10.0 # rpc-rate-burst: 40 worker: log-level: info master_servers: - host: 10.0.1.11 name: master1 # ssh_port: 22 # port: 8261 # peer_port: 8291 # deploy_dir: "/dm-deploy/dm-master-8261" # data_dir: "/dm-data/dm-master-8261" # log_dir: "/dm-deploy/dm-master-8261/log" # numa_node: "0,1" # # The following configs are used to overwrite the `server_configs.master` values. config: log-level: info # rpc-timeout: "30s" # rpc-rate-limit: 10.0 # rpc-rate-burst: 40 - host: 10.0.1.18 name: master2 - host: 10.0.1.19 name: master3 worker_servers: - host: 10.0.1.12 # ssh_port: 22 # port: 8262 # deploy_dir: "/dm-deploy/dm-worker-8262" # log_dir: "/dm-deploy/dm-worker-8262/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.dm-worker` values config: log-level: info # keepalive-ttl: 60 # relay-keepalive-ttl: 1800 # since v2.0.2 # relay-dir: "" # since v5.4.0 - host: 10.0.1.19 monitoring_servers: - host: 10.0.1.13 # ssh_port: 22 # port: 9090 # deploy_dir: "/tidb-deploy/prometheus-8249" # data_dir: "/tidb-data/prometheus-8249" # log_dir: "/tidb-deploy/prometheus-8249/log" # prometheus rule dir on TiUP machine # rule_dir: /home/tidb/prometheus_rule grafana_servers: - host: 10.0.1.14 # port: 3000 # deploy_dir: /tidb-deploy/grafana-3000 # grafana dashboard dir on TiUP machine # dashboard_dir: /home/tidb/dashboards alertmanager_servers: - host: 10.0.1.15 # ssh_port: 22 # web_port: 9093 # cluster_port: 9094 # deploy_dir: "/tidb-deploy/alertmanager-9093" # data_dir: "/tidb-data/alertmanager-9093" # log_dir: "/tidb-deploy/alertmanager-9093/log" # if monitored is set, node_exporter and blackbox_exporter will be # deployed with the port specified, otherwise they are not deployed # on the server to avoid conflict with tidb clusters #monitored: # node_exporter_port: 9100 # blackbox_exporter_port: 9115 tiup-1.16.3/embed/templates/000077500000000000000000000000001505422223000156305ustar00rootroot00000000000000tiup-1.16.3/embed/templates/config/000077500000000000000000000000001505422223000170755ustar00rootroot00000000000000tiup-1.16.3/embed/templates/config/alertmanager.yml000066400000000000000000000040611505422223000222630ustar00rootroot00000000000000global: # The smarthost and SMTP sender used for mail notifications. smtp_smarthost: "localhost:25" smtp_from: "alertmanager@example.org" smtp_auth_username: "alertmanager" smtp_auth_password: "password" # smtp_require_tls: true # The Slack webhook URL. # slack_api_url: '' route: # A default receiver receiver: "blackhole" # The labels by which incoming alerts are grouped together. For example, # multiple alerts coming in for cluster=A and alertname=LatencyHigh would # be batched into a single group. group_by: ["env", "instance", "alertname", "type", "group", "job"] # When a new group of alerts is created by an incoming alert, wait at # least 'group_wait' to send the initial notification. # This way ensures that you get multiple alerts for the same group that start # firing shortly after another are batched together on the first # notification. group_wait: 30s # When the first notification was sent, wait 'group_interval' to send a batch # of new alerts that started firing for that group. group_interval: 3m # If an alert has successfully been sent, wait 'repeat_interval' to # resend them. repeat_interval: 3m routes: # - match: # receiver: webhook-kafka-adapter # continue: true # - match: # env: test-cluster # receiver: db-alert-slack # - match: # env: test-cluster # receiver: db-alert-email receivers: # - name: 'webhook-kafka-adapter' # webhook_configs: # - send_resolved: true # url: 'http://10.0.3.6:28082/v1/alertmanager' #- name: 'db-alert-slack' # slack_configs: # - channel: '#alerts' # username: 'db-alert' # icon_emoji: ':bell:' # title: '{{ .CommonLabels.alertname }}' # text: '{{ .CommonAnnotations.summary }} {{ .CommonAnnotations.description }} expr: {{ .CommonLabels.expr }} http://172.0.0.1:9093/#/alerts' # - name: "db-alert-email" # email_configs: # - send_resolved: true # to: "example@example.com" # This doesn't alert anything, please configure your own receiver - name: "blackhole" tiup-1.16.3/embed/templates/config/blackbox.yml.tpl000066400000000000000000000020131505422223000221770ustar00rootroot00000000000000modules: http_2xx: prober: http http: method: GET http_post_2xx: prober: http http: method: POST tcp_connect: prober: tcp {{- if .TLSEnabled}} tls_connect: prober: tcp tcp: tls: true tls_config: insecure_skip_verify: false ca_file: {{.DeployDir}}/tls/ca.crt cert_file: {{.DeployDir}}/tls/blackbox_exporter.crt key_file: {{.DeployDir}}/tls/blackbox_exporter.pem {{- end}} pop3s_banner: prober: tcp tcp: query_response: - expect: '^+OK' tls: true tls_config: insecure_skip_verify: false ssh_banner: prober: tcp tcp: query_response: - expect: '^SSH-2.0-' irc_banner: prober: tcp tcp: query_response: - send: 'NICK prober' - send: 'USER prober prober prober :prober' - expect: 'PING :([^ ]+)' send: 'PONG ${1}' - expect: '^:[^ ]+ 001' icmp: prober: icmp timeout: 5s icmp: preferred_ip_protocol: 'ip4' tiup-1.16.3/embed/templates/config/dashboard.yml.tpl000066400000000000000000000003361505422223000223470ustar00rootroot00000000000000apiVersion: 1 providers: - name: {{.ClusterName}} folder: {{.ClusterName}} type: file disableDeletion: false editable: true updateIntervalSeconds: 30 options: path: {{.DeployDir}}/dashboardstiup-1.16.3/embed/templates/config/datasource.yml.tpl000066400000000000000000000004241505422223000225500ustar00rootroot00000000000000apiVersion: 1 datasources: {{- range .Datasources}} - name: {{.Name}} type: {{.Type}} access: proxy url: {{.URL}} withCredentials: false isDefault: {{.IsDefault}} tlsAuth: false tlsAuthWithCACert: false version: 1 editable: true {{- end}}tiup-1.16.3/embed/templates/config/grafana.ini.tpl000066400000000000000000000207621505422223000220020ustar00rootroot00000000000000##################### Grafana Configuration Example ##################### # # Everything has defaults so you only need to uncomment things you want to # change # possible values : production, development ; app_mode = production # instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty ; instance_name = ${HOSTNAME} #################################### Paths #################################### [paths] # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) # data = {{.DeployDir}}/data # # Directory where grafana can store logs # logs = {{.DeployDir}}/log # # Directory where grafana will automatically scan and look for plugins # plugins = {{.DeployDir}}/plugins # # folder that contains provisioning config files that grafana will apply on startup and while running. provisioning = {{.DeployDir}}/provisioning # #################################### Server #################################### [server] # Protocol (http or https) ;protocol = http # The ip address to bind to, empty will bind to all interfaces ;http_addr = # The http port to use http_port = {{.Port}} # The public facing domain name used to access grafana from a browser {{- if .Domain}} domain = {{.Domain}} {{- else}} domain = {{.IP}} {{- end}} # Redirect to correct domain if host header does not match domain # Prevents DNS rebinding attacks ;enforce_domain = false # The full public facing url {{- if .RootURL}} root_url = {{.RootURL}} server_from_sub_path = true {{- end}} # Log web requests ;router_logging = false # the path relative working path ;static_root_path = public # enable gzip ;enable_gzip = false # https certs & key file ;cert_file = ;cert_key = #################################### Database #################################### [database] # Either "mysql", "postgres" or "sqlite3", it's your choice ;type = sqlite3 ;host = 127.0.0.1:3306 ;name = grafana ;user = root ;password = # For "postgres" only, either "disable", "require" or "verify-full" ;ssl_mode = disable # For "sqlite3" only, path relative to data_path setting ;path = grafana.db #################################### Session #################################### [session] # Either "memory", "file", "redis", "mysql", "postgres", default is "file" ;provider = file # Provider config options # memory: not have any config yet # file: session dir path, is relative to grafana data_path # redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` # mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name` # postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable ;provider_config = sessions # Session cookie name ;cookie_name = grafana_sess # If you use session in https only, default is false ;cookie_secure = false # Session life time, default is 86400 ;session_life_time = 86400 #################################### Analytics #################################### [analytics] # Server reporting, sends usage counters to stats.grafana.org every 24 hours. # No ip addresses are being tracked, only simple counters to track # running instances, dashboard and error counts. It is very helpful to us. # Change this option to false to disable reporting. ;reporting_enabled = true # Set to false to disable all checks to https://grafana.net # for new versions (grafana itself and plugins), check is used # in some UI views to notify that grafana or plugin update exists # This option does not cause any auto updates, nor send any information # only a GET request to http://grafana.net to get latest versions check_for_updates = true # Google Analytics universal tracking code, only enabled if you specify an id here ;google_analytics_ua_id = #################################### Security #################################### [security] # default admin user, created on startup admin_user = {{.Username}} # default admin password, can be changed before first start of grafana, or in profile settings admin_password = `{{.Password}}` # used for signing ;secret_key = SW2YcwTIb9zpOOhoPsMm # Auto-login remember days ;login_remember_days = 7 ;cookie_username = grafana_user ;cookie_remember_name = grafana_remember # disable gravatar profile images ;disable_gravatar = false # data source proxy whitelist (ip_or_domain:port separated by spaces) ;data_source_proxy_whitelist = [snapshots] # snapshot sharing options ;external_enabled = true ;external_snapshot_url = https://snapshots-origin.raintank.io ;external_snapshot_name = Publish to snapshot.raintank.io #################################### Users #################################### [users] # disable user signup / registration ;allow_sign_up = true # Allow non admin users to create organizations ;allow_org_create = true # Set to true to automatically assign new users to the default organization (id 1) ;auto_assign_org = true # Default role new users will be automatically assigned (if disabled above is set to true) ;auto_assign_org_role = Viewer # Background text for the user field on the login page ;login_hint = email or username # Default UI theme ("dark" or "light") {{- if .DefaultTheme}} default_theme = {{.DefaultTheme}} {{- else}} ;default_theme = dark {{- end}} ############### Set Cookie Name for Multiple Instances ####################### [auth] login_cookie_name = grafana_session_{{ if .Domain }}{{.Domain}}_{{ end }}{{.Port}} #################################### Anonymous Auth ########################## [auth.anonymous] {{- if .AnonymousEnable}} enabled = true {{- end}} # specify organization name that should be used for unauthenticated users {{- if .OrgName}} org_name = {{.OrgName}} {{- else}} ;org_name = Main Org. {{- end}} # specify role for unauthenticated users {{- if .OrgRole}} org_role = {{.OrgRole}} {{- else}} ;org_role = Viewer {{- end}} #################################### Basic Auth ########################## [auth.basic] ;enabled = true #################################### Auth LDAP ########################## [auth.ldap] ;enabled = false ;config_file = /etc/grafana/ldap.toml #################################### SMTP / Emailing ########################## [smtp] ;enabled = false ;host = localhost:25 ;user = ;password = ;cert_file = ;key_file = ;skip_verify = false ;from_address = admin@grafana.localhost [emails] ;welcome_email_on_sign_up = false #################################### Logging ########################## [log] # Either "console", "file", "syslog". Default is console and file # Use space to separate multiple modes, e.g. "console file" mode = file # Either "trace", "debug", "info", "warn", "error", "critical", default is "info" ;level = info # For "console" mode only [log.console] ;level = # log line format, valid options are text, console and json ;format = console # For "file" mode only [log.file] level = info # log line format, valid options are text, console and json format = text # This enables automated log rotate(switch of following options), default is true ;log_rotate = true # Max line number of single file, default is 1000000 ;max_lines = 1000000 # Max size shift of single file, default is 28 means 1 << 28, 256MB ;max_size_shift = 28 # Segment log daily, default is true ;daily_rotate = true # Expired days of log file(delete after max days), default is 7 ;max_days = 7 [log.syslog] ;level = # log line format, valid options are text, console and json ;format = text # Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. ;network = ;address = # Syslog facility. user, daemon and local0 through local7 are valid. ;facility = # Syslog tag. By default, the process' argv[0] is used. ;tag = #################################### AMQP Event Publisher ########################## [event_publisher] ;enabled = false ;rabbitmq_url = amqp://localhost/ ;exchange = grafana_events ;#################################### Dashboard JSON files ########################## [dashboards.json] enabled = false path = {{.DeployDir}}/dashboards #################################### Internal Grafana Metrics ########################## # Metrics available at HTTP API Url /api/metrics [metrics] # Disable / Enable internal metrics ;enabled = true # Publish interval ;interval_seconds = 10 # Send internal metrics to Graphite ; [metrics.graphite] ; address = localhost:2003 ; prefix = prod.grafana.%(instance_name)s. #################################### Internal Grafana Metrics ########################## # Url used to to import dashboards directly from Grafana.net [grafana_net] url = https://grafana.net tiup-1.16.3/embed/templates/config/ngmonitoring.toml.tpl000066400000000000000000000011131505422223000232760ustar00rootroot00000000000000# NG Monitoring Server Configuration. # Server address. address = "{{.Address}}" advertise-address = "{{.AdvertiseAddress}}" [log] # Log path path = "{{.LogDir}}" # Log level: INFO, WARN, ERROR level = "INFO" [pd] # Addresses of PD instances within the TiDB cluster. Multiple addresses are separated by commas, e.g. "10.0.0.1:2379","10.0.0.2:2379" endpoints = [{{.PDAddrs}}] {{- if .TLSEnabled}} [security] ca-path = "{{.DeployDir}}/tls/ca.crt" cert-path = "{{.DeployDir}}/tls/prometheus.crt" key-path = "{{.DeployDir}}/tls/prometheus.pem" {{- end}} [storage] path = "{{.DataDir}}"tiup-1.16.3/embed/templates/config/prometheus.yml.tpl000066400000000000000000000264641505422223000226250ustar00rootroot00000000000000--- global: {{- if .ScrapeInterval}} scrape_interval: {{.ScrapeInterval}} {{- else}} scrape_interval: 15s # By default, scrape targets every 15 seconds. {{- end}} evaluation_interval: 15s # By default, scrape targets every 15 seconds. {{- if .ScrapeTimeout}} scrape_timeout: {{.ScrapeTimeout}} {{- end}} external_labels: cluster: '{{.ClusterName}}' monitor: "prometheus" # Load and evaluate rules in this file every 'evaluation_interval' seconds. rule_files: {{- if .LocalRules}} {{- range .LocalRules}} - '{{.}}' {{- end}} {{- else}} {{- if and .MonitoredServers .PDAddrs}} - 'node.rules.yml' - 'blacker.rules.yml' - 'bypass.rules.yml' {{- end}} {{- if .PDAddrs}} - 'pd.rules.yml' {{- end}} {{- if .TiDBStatusAddrs}} - 'tidb.rules.yml' {{- end}} {{- if .TiKVStatusAddrs}} - 'tikv.rules.yml' {{- if .HasTiKVAccelerateRules}} - 'tikv.accelerate.rules.yml' {{- end}} {{- end}} {{- if .TiFlashStatusAddrs}} - 'tiflash.rules.yml' {{- end}} {{- if .PumpAddrs}} - 'binlog.rules.yml' {{- end}} {{- if .CDCAddrs}} - 'ticdc.rules.yml' {{- end}} {{- if .LightningAddrs}} - 'lightning.rules.yml' {{- end}} {{- if .DMWorkerAddrs}} - 'dm_worker.rules.yml' {{- end}} {{- end}} {{- if .AlertmanagerAddrs}} alerting: alertmanagers: - static_configs: - targets: {{- range .AlertmanagerAddrs}} - '{{.}}' {{- end}} {{- end}} scrape_configs: {{- if .PushgatewayAddrs}} - job_name: 'overwritten-cluster' scrape_interval: 15s honor_labels: true # don't overwrite job & instance labels static_configs: - targets: {{- range .PushgatewayAddrs}} - '{{.}}' {{- end}} {{- end}} {{- if .LightningAddrs}} - job_name: "lightning" {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: ['{{index .LightningAddrs 0}}'] {{- end}} - job_name: "overwritten-nodes" honor_labels: true # don't overwrite job & instance labels static_configs: - targets: {{- range .NodeExporterAddrs}} - '{{.}}' {{- end}} - job_name: "tidb" honor_labels: true # don't overwrite job & instance labels metric_relabel_configs: - action: drop regex: tidb_tikvclient_source_request_seconds_count|tidb_tikvclient_batch_requests_sum|tidb_tikvclient_batch_requests_count|tidb_tikvclient_batch_pending_requests_sum|tidb_tikvclient_batch_pending_requests_count source_labels: - __name__ {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .TiDBStatusAddrs}} - '{{.}}' {{- end}} - job_name: "tiproxy" honor_labels: true # don't overwrite job & instance labels metrics_path: /api/metrics {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .TiProxyStatusAddrs}} - '{{.}}' {{- end}} - job_name: "tikv" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .TiKVStatusAddrs}} - '{{.}}' {{- end}} - job_name: "pd" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .PDAddrs}} - '{{.}}' {{- end}} - job_name: "tso" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .TSOAddrs}} - '{{.}}' {{- end}} - job_name: "scheduling" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .SchedulingAddrs}} - '{{.}}' {{- end}} {{- if .TiFlashStatusAddrs}} - job_name: "tiflash" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .TiFlashStatusAddrs}} - '{{.}}' {{- end}} {{- range .TiFlashLearnerStatusAddrs}} - '{{.}}' {{- end}} {{- end}} {{- if .PumpAddrs}} - job_name: 'pump' honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .PumpAddrs}} - '{{.}}' {{- end}} - job_name: 'drainer' honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .DrainerAddrs}} - '{{.}}' {{- end}} - job_name: "port_probe" scrape_interval: 30s metrics_path: /probe params: {{- if .TLSEnabled}} module: [tls_connect] {{- else}} module: [tcp_connect] {{- end}} static_configs: - targets: {{- range .PumpAddrs}} - '{{.}}' {{- end}} labels: group: 'pump' - targets: {{- range .DrainerAddrs}} - '{{.}}' {{- end}} labels: group: 'drainer' relabel_configs: - source_labels: [__address__] target_label: __param_target - source_labels: [__param_target] target_label: instance - target_label: __address__ replacement: '{{.BlackboxAddr}}' {{- end}} {{- if .CDCAddrs}} - job_name: "ticdc" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .CDCAddrs}} - '{{.}}' {{- end}} {{- end}} {{- if .TiKVCDCAddrs}} - job_name: "tikv-cdc" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .TiKVCDCAddrs}} - '{{.}}' {{- end}} {{- end}} {{- if .NGMonitoringAddrs}} - job_name: "ng-monitoring" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .NGMonitoringAddrs}} - '{{.}}' {{- end}} {{- end}} - job_name: "tidb_port_probe" scrape_interval: 30s metrics_path: /probe params: {{- if .TLSEnabled}} module: [tls_connect] {{- else}} module: [tcp_connect] {{- end}} static_configs: - targets: {{- range .TiDBStatusAddrs}} - '{{.}}' {{- end}} labels: group: 'tidb' - targets: {{- range .TiKVStatusAddrs}} - '{{.}}' {{- end}} labels: group: 'tikv' - targets: {{- range .PDAddrs}} - '{{.}}' {{- end}} labels: group: 'pd' {{- if .TiFlashStatusAddrs}} - targets: {{- range .TiFlashStatusAddrs}} - '{{.}}' {{- end}} labels: group: 'tiflash' {{- end}} {{- if .CDCAddrs}} - targets: {{- range .CDCAddrs}} - '{{.}}' {{- end}} labels: group: 'ticdc' {{- end}} relabel_configs: - source_labels: [__address__] target_label: __param_target - source_labels: [__param_target] target_label: instance - target_label: __address__ replacement: '{{.BlackboxAddr}}' - job_name: "monitor_port_probe" scrape_interval: 30s metrics_path: /probe params: module: [tcp_connect] static_configs: {{- if .PushgatewayAddrs}} - targets: {{- range .PushgatewayAddrs}} - '{{.}}' {{- end}} labels: group: 'pushgateway' {{- end}} {{- if .GrafanaAddr}} - targets: - '{{.GrafanaAddr}}' labels: group: 'grafana' {{- end}} - targets: {{- range .NodeExporterAddrs}} - '{{.}}' {{- end}} labels: group: 'node_exporter' - targets: {{- range .BlackboxExporterAddrs}} - '{{.}}' {{- end}} labels: group: 'blackbox_exporter' relabel_configs: - source_labels: [__address__] target_label: __param_target - source_labels: [__param_target] target_label: instance {{- if .BlackboxAddr}} - target_label: __address__ replacement: '{{.BlackboxAddr}}' {{- end}} {{- range $addr := .BlackboxExporterAddrs}} - job_name: "blackbox_exporter_{{$addr}}_icmp" scrape_interval: 6s metrics_path: /probe params: module: [icmp] static_configs: - targets: {{- range $.MonitoredServers}} - '{{.}}' {{- end}} relabel_configs: - source_labels: [__address__] regex: (.*)(:80)? target_label: __param_target replacement: ${1} - source_labels: [__param_target] regex: (.*) target_label: ping replacement: ${1} - source_labels: [] regex: .* target_label: __address__ replacement: '{{$addr}}' {{- end}} {{- if .DMMasterAddrs}} - job_name: "dm_master" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .DMMasterAddrs}} - '{{.}}' {{- end}} {{- end}} {{- if .DMWorkerAddrs}} - job_name: "dm_worker" honor_labels: true # don't overwrite job & instance labels {{- if .TLSEnabled}} scheme: https tls_config: insecure_skip_verify: false ca_file: ../tls/ca.crt cert_file: ../tls/prometheus.crt key_file: ../tls/prometheus.pem {{- end}} static_configs: - targets: {{- range .DMWorkerAddrs}} - '{{.}}' {{- end}} {{- end}} {{- if .RemoteConfig}} {{.RemoteConfig}} {{- end}}tiup-1.16.3/embed/templates/config/spark-defaults.conf.tpl000066400000000000000000000027241505422223000234740ustar00rootroot00000000000000# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Default system properties included when running spark-submit. # This is useful for setting default environmental settings. # Example: #spark.eventLog.dir: "hdfs://namenode:8021/directory" # spark.executor.extraJavaOptions -XX:+PrintGCDetails -Dkey=value -Dnumbers="one two three" {{- define "PDList"}} {{- range $idx, $pd := .}} {{- if eq $idx 0}} {{- $pd}} {{- else -}} ,{{$pd}} {{- end}} {{- end}} {{- end}} {{ range $k, $v := .CustomFields}} {{ $k }} {{ $v }} {{- end }} spark.sql.extensions org.apache.spark.sql.TiExtensions {{- if .TiSparkMasters}} spark.master spark://{{.TiSparkMasters}} {{- end}} spark.tispark.pd.addresses {{template "PDList" .Endpoints}} tiup-1.16.3/embed/templates/config/spark-log4j.properties.tpl000066400000000000000000000041541505422223000241520ustar00rootroot00000000000000# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Set everything to be logged to the console log4j.rootCategory=INFO, console log4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.target=System.err log4j.appender.console.layout=org.apache.log4j.PatternLayout log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n # Set the default spark-shell log level to WARN. When running the spark-shell, the # log level for this class is used to overwrite the root logger's log level, so that # the user can have different defaults for the shell and regular Spark apps. log4j.logger.org.apache.spark.repl.Main=WARN # Settings to quiet third party logs that are too verbose log4j.logger.org.spark_project.jetty=WARN log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO log4j.logger.org.apache.parquet=ERROR log4j.logger.parquet=ERROR # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR # tispark disable "WARN ObjectStore:568 - Failed to get database" log4j.logger.org.apache.hadoop.hive.metastore.ObjectStore=ERROR tiup-1.16.3/embed/templates/scripts/000077500000000000000000000000001505422223000173175ustar00rootroot00000000000000tiup-1.16.3/embed/templates/scripts/run_alertmanager.sh.tpl000066400000000000000000000015761505422223000240100ustar00rootroot00000000000000#!/bin/bash set -e DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! exec > >(tee -i -a "{{.LogDir}}/alertmanager.log") exec 2>&1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/alertmanager/alertmanager \ {{- else}} exec bin/alertmanager/alertmanager \ {{- end}} --config.file="conf/alertmanager.yml" \ --storage.path="{{.DataDir}}" \ --data.retention=120h \ --log.level="info" \ --web.listen-address="{{.WebListenAddr}}" \ --web.external-url="{{.WebExternalURL}}" \ {{- if .ClusterPeers}} {{- range $idx, $am := .ClusterPeers}} --cluster.peer="{{$am}}" \ {{- end}} {{- end}} {{- if .AdditionalArgs}} {{- range .AdditionalArgs}} {{.}} \ {{- end}} {{- end}} --cluster.listen-address="{{.ClusterListenAddr}}" tiup-1.16.3/embed/templates/scripts/run_blackbox_exporter.sh.tpl000066400000000000000000000011471505422223000250550ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 exec > >(tee -i -a "{{.LogDir}}/blackbox_exporter.log") exec 2>&1 EXPORTER_BIN=bin/blackbox_exporter/blackbox_exporter if [ ! -f $EXPORTER_BIN ]; then EXPORTER_BIN=bin/blackbox_exporter fi {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} $EXPORTER_BIN \ {{- else}} exec $EXPORTER_BIN \ {{- end}} --web.listen-address=":{{.Port}}" \ --log.level="info" \ --config.file="conf/blackbox.yml" tiup-1.16.3/embed/templates/scripts/run_cdc.sh.tpl000066400000000000000000000017051505422223000220710ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/cdc server \ {{- else}} exec bin/cdc server \ {{- end}} --addr "{{.Addr}}" \ --advertise-addr "{{.AdvertiseAddr}}" \ --pd "{{.PD}}" \ {{- if .DataDir}} {{- if .DataDirEnabled}} --data-dir="{{.DataDir}}" \ {{- else}} --sort-dir="{{.DataDir}}/tmp/sorter" \ {{- end}} {{- end}} {{- if .TLSEnabled}} --ca tls/ca.crt \ --cert tls/cdc.crt \ --key tls/cdc.pem \ {{- end}} {{- if .GCTTL}} --gc-ttl {{.GCTTL}} \ {{- end}} {{- if .TZ}} --tz "{{.TZ}}" \ {{- end}} {{- if .ClusterID}} --cluster-id {{.ClusterID}} \ {{- end}} {{- if .ConfigFileEnabled}} --config conf/cdc.toml \ {{- end}} --log-file "{{.LogDir}}/cdc.log" 2>> "{{.LogDir}}/cdc_stderr.log" tiup-1.16.3/embed/templates/scripts/run_dm-master.sh.tpl000066400000000000000000000015111505422223000232240ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/dm-master/dm-master \ {{- else}} exec bin/dm-master/dm-master \ {{- end}} {{- if .V1SourcePath}} --v1-sources-path="{{.V1SourcePath}}" \ {{- end}} --name="{{.Name}}" \ --master-addr="{{.MasterAddr}}" \ --advertise-addr="{{.AdvertiseAddr}}" \ --peer-urls="{{.PeerURL}}" \ --advertise-peer-urls="{{.AdvertisePeerURL}}" \ --log-file="{{.LogDir}}/dm-master.log" \ --data-dir="{{.DataDir}}" \ --initial-cluster="{{.InitialCluster}}" \ --config=conf/dm-master.toml >> "{{.LogDir}}/dm-master_stdout.log" 2>> "{{.LogDir}}/dm-master_stderr.log" tiup-1.16.3/embed/templates/scripts/run_dm-master_scale.sh.tpl000066400000000000000000000013471505422223000244020ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/dm-master/dm-master \ {{- else}} exec bin/dm-master/dm-master \ {{- end}} --name="{{.Name}}" \ --master-addr="{{.MasterAddr}}" \ --advertise-addr="{{.AdvertiseAddr}}" \ --peer-urls="{{.PeerURL}}" \ --advertise-peer-urls="{{.AdvertisePeerURL}}" \ --log-file="{{.LogDir}}/dm-master.log" \ --data-dir="{{.DataDir}}" \ --join="{{.Join}}" \ --config=conf/dm-master.toml >> "{{.LogDir}}/dm-master_stdout.log" 2>> "{{.LogDir}}/dm-master_stderr.log" tiup-1.16.3/embed/templates/scripts/run_dm-worker.sh.tpl000066400000000000000000000011631505422223000232450ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/dm-worker/dm-worker \ {{- else}} exec bin/dm-worker/dm-worker \ {{- end}} --name="{{.Name}}" \ --worker-addr="{{.WorkerAddr}}" \ --advertise-addr="{{.AdvertiseAddr}}" \ --log-file="{{.LogDir}}/dm-worker.log" \ --join="{{.Join}}" \ --config=conf/dm-worker.toml >> "{{.LogDir}}/dm-worker_stdout.log" 2>> "{{.LogDir}}/dm-worker_stderr.log" tiup-1.16.3/embed/templates/scripts/run_drainer.sh.tpl000066400000000000000000000010671505422223000227650ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/drainer \ {{- else}} exec bin/drainer \ {{- end}} {{- if .NodeID}} --node-id="{{.NodeID}}" \ {{- end}} --addr="{{.Addr}}" \ --pd-urls="{{.PD}}" \ --data-dir="{{.DataDir}}" \ --log-file="{{.LogDir}}/drainer.log" \ --config=conf/drainer.toml 2>> "{{.LogDir}}/drainer_stderr.log" tiup-1.16.3/embed/templates/scripts/run_grafana.sh.tpl000066400000000000000000000004671505422223000227430ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 LANG=en_US.UTF-8 \ exec bin/bin/grafana-server \ --homepath="{{.DeployDir}}/bin" \ --config="{{.DeployDir}}/conf/grafana.ini" tiup-1.16.3/embed/templates/scripts/run_node_exporter.sh.tpl000066400000000000000000000014001505422223000242050ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 exec > >(tee -i -a "{{.LogDir}}/node_exporter.log") exec 2>&1 EXPORTER_BIN=bin/node_exporter/node_exporter if [ ! -f $EXPORTER_BIN ]; then EXPORTER_BIN=bin/node_exporter fi {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} $EXPORTER_BIN \ {{- else}} exec $EXPORTER_BIN \ {{- end}} --web.listen-address=":{{.Port}}" \ --collector.tcpstat \ --collector.mountstats \ --collector.meminfo_numa \ --collector.buddyinfo \ --collector.vmstat.fields="^.*" \ --log.level="info" #--collector.systemd \ #--collector.interrupts \ tiup-1.16.3/embed/templates/scripts/run_pd.sh.tpl000066400000000000000000000013731505422223000217440ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 exec \ {{- if .NumaNode}} numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} \ {{- end}} env GODEBUG=madvdontneed=1 \ {{- if .MSMode}} PD_SERVICE_MODE=api \ {{- end}} bin/pd-server \ --name="{{.Name}}" \ --client-urls="{{.ClientURL}}" \ --advertise-client-urls="{{.AdvertiseClientURL}}" \ --peer-urls="{{.PeerURL}}" \ --advertise-peer-urls="{{.AdvertisePeerURL}}" \ --data-dir="{{.DataDir}}" \ --initial-cluster="{{.InitialCluster}}" \ --config=conf/pd.toml \ --log-file="{{.LogDir}}/pd.log" 2>> "{{.LogDir}}/pd_stderr.log" tiup-1.16.3/embed/templates/scripts/run_pd_scale.sh.tpl000066400000000000000000000013511505422223000231070ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 exec \ {{- if .NumaNode}} numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} \ {{- end}} env GODEBUG=madvdontneed=1 \ {{- if .MSMode}} PD_SERVICE_MODE=api \ {{- end}} bin/pd-server \ --name="{{.Name}}" \ --client-urls="{{.ClientURL}}" \ --advertise-client-urls="{{.AdvertiseClientURL}}" \ --peer-urls="{{.PeerURL}}" \ --advertise-peer-urls="{{.AdvertisePeerURL}}" \ --data-dir="{{.DataDir}}" \ --join="{{.Join}}" \ --config=conf/pd.toml \ --log-file="{{.LogDir}}/pd.log" 2>> "{{.LogDir}}/pd_stderr.log" tiup-1.16.3/embed/templates/scripts/run_prometheus.sh.tpl000066400000000000000000000027351505422223000235370ustar00rootroot00000000000000#!/bin/bash set -e DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! if [ -e "bin/ng-monitoring-server" ]; then echo "#!/bin/bash # WARNING: This file was auto-generated to restart ng-monitoring when fail. # Do not edit! All your edit might be overwritten! while true do {{- if .NumaNode}} numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/ng-monitoring-server \ {{- else}} bin/ng-monitoring-server \ {{- end}} --config {{.DeployDir}}/conf/ngmonitoring.toml \ >/dev/null 2>&1 sleep 15s done" > scripts/ng-wrapper.sh fi {{- if .EnableNG}} /bin/bash scripts/ng-wrapper.sh & {{- end}} exec > >(tee -i -a "{{.LogDir}}/prometheus.log") exec 2>&1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/prometheus/prometheus \ {{- else}} exec bin/prometheus/prometheus \ {{- end}} --config.file="{{.DeployDir}}/conf/prometheus.yml" \ --web.listen-address=":{{.Port}}" \ --web.external-url="{{.WebExternalURL}}/" \ --web.enable-admin-api \ --log.level="info" \ {{- if not .EnablePromAgentMode}} --storage.tsdb.path="{{.DataDir}}" \ {{- end}} {{- if .EnablePromAgentMode}} --enable-feature=agent \ {{- end}} {{- if .AdditionalArgs}} {{- range .AdditionalArgs}} {{.}} \ {{- end}} {{- end}} {{- if not .EnablePromAgentMode}} --storage.tsdb.retention="{{.Retention}}" {{- end}} tiup-1.16.3/embed/templates/scripts/run_pump.sh.tpl000066400000000000000000000011241505422223000223140ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/pump \ {{- else}} exec bin/pump \ {{- end}} {{- if .NodeID}} --node-id="{{.NodeID}}" \ {{- end}} --addr="{{.Addr}}" \ --advertise-addr="{{.AdvertiseAddr}}" \ --pd-urls="{{.PD}}" \ --data-dir="{{.DataDir}}" \ --log-file="{{.LogDir}}/pump.log" \ --config=conf/pump.toml 2>> "{{.LogDir}}/pump_stderr.log" tiup-1.16.3/embed/templates/scripts/run_scheduling.sh.tpl000066400000000000000000000013261505422223000234640ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} env GODEBUG=madvdontneed=1 bin/pd-server services scheduling\ {{- else}} exec env GODEBUG=madvdontneed=1 bin/pd-server services scheduling \ {{- end}} {{- if .Name}} --name="{{.Name}}" \ {{- end}} --backend-endpoints="{{.BackendEndpoints}}" \ --listen-addr="{{.ListenURL}}" \ --advertise-listen-addr="{{.AdvertiseListenURL}}" \ --config=conf/scheduling.toml \ --log-file="{{.LogDir}}/scheduling.log" 2>> "{{.LogDir}}/scheduling_stderr.log" tiup-1.16.3/embed/templates/scripts/run_tidb-dashboard.sh.tpl000066400000000000000000000013021505422223000242000ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tidb-dashboard \ {{- else}} exec bin/tidb-dashboard \ {{- end}} --feature-version="{{.TidbVersion}}" \ --host="{{.Host}}" \ --port="{{.Port}}" \ --pd="{{.PD}}" \ --data-dir="{{.DataDir}}" \ {{- if .TLSEnabled}} --tidb-ca tls/ca.crt \ --tidb-cert tls/tidb-dashboard.crt \ --tidb-key tls/tidb-dashboard.pem \ {{- end}} 1>> "{{.LogDir}}/tidb_dashboard.log" \ 2>> "{{.LogDir}}/tidb_dashboard_stderr.log" tiup-1.16.3/embed/templates/scripts/run_tidb.sh.tpl000066400000000000000000000016551505422223000222660ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if and .NumaNode .NumaCores}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} -C {{.NumaCores}} env GODEBUG=madvdontneed=1 bin/tidb-server \ {{- else if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} env GODEBUG=madvdontneed=1 bin/tidb-server \ {{- else}} exec env GODEBUG=madvdontneed=1 bin/tidb-server \ {{- end}} -P {{.Port}} \ --status="{{.StatusPort}}" \ --host="{{.ListenHost}}" \ --advertise-address="{{.AdvertiseAddr}}" \ --store="tikv" \ {{- if .SupportSecboot}} --initialize-insecure \ {{- end}} --path="{{.PD}}" \ --log-slow-query="{{.LogDir}}/tidb_slow_query.log" \ --config=conf/tidb.toml \ --log-file="{{.LogDir}}/tidb.log" 2>> "{{.LogDir}}/tidb_stderr.log" tiup-1.16.3/embed/templates/scripts/run_tiflash.sh.tpl000066400000000000000000000024341505422223000227720ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! cd "{{.DeployDir}}" || exit 1 export RUST_BACKTRACE=1 export TZ=${TZ:-/etc/localtime} export LD_LIBRARY_PATH={{.DeployDir}}/bin/tiflash:$LD_LIBRARY_PATH export MALLOC_CONF="prof:true,prof_active:false" {{- if .RequiredCPUFlags }} if [ -f "/proc/cpuinfo" ]; then IFS_OLD=$IFS IFS=' ' required_cpu_flags="{{.RequiredCPUFlags}}" for flag in $(echo $required_cpu_flags); do if grep -q ${flag} /proc/cpuinfo; then true else err_msg="Fail to check CPU flags: \`${flag}\` not supported. Require \`${required_cpu_flags}\`." echo ${err_msg} echo ${err_msg} >>"{{.LogDir}}/tiflash_stderr.log" exit -1 fi done IFS=$IFS_OLD fi {{- end}} echo -n 'sync ... ' stat=$(time sync) echo ok echo $stat {{- if and .NumaNode .NumaCores}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} -C {{.NumaCores}} bin/tiflash/tiflash server \ {{- else if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tiflash/tiflash server \ {{- else}} exec bin/tiflash/tiflash server \ {{- end}} --config-file conf/tiflash.toml 2>> "{{.LogDir}}/tiflash_stderr.log" tiup-1.16.3/embed/templates/scripts/run_tikv-cdc.sh.tpl000066400000000000000000000014311505422223000230400ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tikv-cdc server \ {{- else}} exec bin/tikv-cdc server \ {{- end}} --addr "{{.Addr}}" \ --advertise-addr "{{.AdvertiseAddr}}" \ --pd "{{.PD}}" \ {{- if .DataDir}} --data-dir="{{.DataDir}}" \ {{- end}} {{- if .TLSEnabled}} --ca tls/ca.crt \ --cert tls/cdc.crt \ --key tls/cdc.pem \ {{- end}} {{- if .GCTTL}} --gc-ttl {{.GCTTL}} \ {{- end}} {{- if .TZ}} --tz "{{.TZ}}" \ {{- end}} --config conf/tikv-cdc.toml \ --log-file "{{.LogDir}}/tikv-cdc.log" 2>> "{{.LogDir}}/tikv-cdc_stderr.log" tiup-1.16.3/embed/templates/scripts/run_tikv.sh.tpl000066400000000000000000000016301505422223000223120ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! cd "{{.DeployDir}}" || exit 1 echo -n 'sync ... ' stat=$(time sync || sync) echo ok echo $stat export MALLOC_CONF="prof:true,prof_active:false" {{- if and .NumaNode .NumaCores}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} -C {{.NumaCores}} bin/tikv-server \ {{- else if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tikv-server \ {{- else}} exec bin/tikv-server \ {{- end}} --addr "{{.Addr}}" \ --advertise-addr "{{.AdvertiseAddr}}" \ --status-addr "{{.StatusAddr}}" \ {{- if .SupportAdvertiseStatusAddr}} --advertise-status-addr "{{.AdvertiseStatusAddr}}" \ {{- end}} --pd "{{.PD}}" \ --data-dir "{{.DataDir}}" \ --config conf/tikv.toml \ --log-file "{{.LogDir}}/tikv.log" 2>> "{{.LogDir}}/tikv_stderr.log" tiup-1.16.3/embed/templates/scripts/run_tiproxy.sh.tpl000066400000000000000000000005321505422223000230530ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} bin/tiproxy \ {{- else}} exec bin/tiproxy \ {{- end}} --config conf/tiproxy.toml tiup-1.16.3/embed/templates/scripts/run_tso.sh.tpl000066400000000000000000000012631505422223000221440ustar00rootroot00000000000000#!/bin/bash set -e # WARNING: This file was auto-generated. Do not edit! # All your edit might be overwritten! DEPLOY_DIR={{.DeployDir}} cd "${DEPLOY_DIR}" || exit 1 {{- if .NumaNode}} exec numactl --cpunodebind={{.NumaNode}} --membind={{.NumaNode}} env GODEBUG=madvdontneed=1 bin/pd-server services tso\ {{- else}} exec env GODEBUG=madvdontneed=1 bin/pd-server services tso \ {{- end}} {{- if .Name}} --name="{{.Name}}" \ {{- end}} --backend-endpoints="{{.BackendEndpoints}}" \ --listen-addr="{{.ListenURL}}" \ --advertise-listen-addr="{{.AdvertiseListenURL}}" \ --config=conf/tso.toml \ --log-file="{{.LogDir}}/tso.log" 2>> "{{.LogDir}}/tso_stderr.log" tiup-1.16.3/embed/templates/scripts/spark-env.sh.tpl000077500000000000000000000106661505422223000223730ustar00rootroot00000000000000#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is sourced when running various Spark programs. # Copy it as spark-env.sh and edit that to configure Spark for your site. # Options read when launching programs locally with # ./bin/run-example or ./bin/spark-submit # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node # - SPARK_PUBLIC_DNS, to set the public dns name of the driver program # - SPARK_CLASSPATH, default classpath entries to append # Options read by executors and drivers running inside the cluster # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node # - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program # - SPARK_CLASSPATH, default classpath entries to append # - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data # - MESOS_NATIVE_JAVA_LIBRARY, to point to your libmesos.so if you use Mesos # Options read in YARN client mode # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files # - SPARK_EXECUTOR_INSTANCES, Number of executors to start (Default: 2) # - SPARK_EXECUTOR_CORES, Number of cores for the executors (Default: 1). # - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G) # - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G) # Options for the daemons used in the standalone deploy mode # - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname # - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master # - SPARK_MASTER_OPTS, to set config properties only for the master (e.g. "-Dx=y") # - SPARK_WORKER_CORES, to set the number of cores to use on this machine # - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g) # - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT, to use non-default ports for the worker # - SPARK_WORKER_INSTANCES, to set the number of worker processes per node # - SPARK_WORKER_DIR, to set the working directory of worker processes # - SPARK_WORKER_OPTS, to set config properties only for the worker (e.g. "-Dx=y") # - SPARK_DAEMON_MEMORY, to allocate to the master, worker and history server themselves (default: 1g). # - SPARK_HISTORY_OPTS, to set config properties only for the history server (e.g. "-Dx=y") # - SPARK_SHUFFLE_OPTS, to set config properties only for the external shuffle service (e.g. "-Dx=y") # - SPARK_DAEMON_JAVA_OPTS, to set config properties for all daemons (e.g. "-Dx=y") # - SPARK_PUBLIC_DNS, to set the public dns name of the master or workers # Generic options for the daemons used in the standalone deploy mode # - SPARK_CONF_DIR Alternate conf dir. (Default: ${SPARK_HOME}/conf) # - SPARK_LOG_DIR Where log files are stored. (Default: ${SPARK_HOME}/logs) # - SPARK_PID_DIR Where the pid file is stored. (Default: /tmp) # - SPARK_IDENT_STRING A string representing this instance of spark. (Default: $USER) # - SPARK_NICENESS The scheduling priority for daemons. (Default: 0) # - SPARK_NO_DAEMONIZE Run the proposed command in the foreground. It will not output a PID file. #export JAVA_HOME, to set jdk home {{ range $k, $v := .CustomEnvs}} {{ $k }}={{ $v }} {{- end }} {{- if .TiSparkMaster}} SPARK_MASTER_HOST={{.TiSparkMaster}} {{- end}} {{- if ne .MasterPort 0}} SPARK_MASTER_PORT={{.MasterPort}} {{- end}} {{- if ne .MasterUIPort 0}} SPARK_MASTER_WEBUI_PORT={{.MasterUIPort}} {{- end}} {{- if ne .WorkerPort 0}} SPARK_WORKER_PORT={{.WorkerPort}} {{- end}} {{- if ne .WorkerUIPort 0}} SPARK_WORKER_WEBUI_PORT={{.WorkerUIPort}} {{- end}} {{- if ne .TiSparkLocalIP ""}} SPARK_LOCAL_IP={{.TiSparkLocalIP}} {{- end}} SPARK_PUBLIC_DNS={{.Host}} tiup-1.16.3/embed/templates/scripts/start_tispark_slave.sh.tpl000077500000000000000000000062101505422223000245370ustar00rootroot00000000000000#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Starts a slave on the machine this script is executed on. # # Environment Variables # # SPARK_WORKER_INSTANCES The number of worker instances to run on this # slave. Default is 1. # SPARK_WORKER_PORT The base port number for the first worker. If set, # subsequent workers will increment this number. If # unset, Spark will find a valid port number, but # with no guarantee of a predictable pattern. # SPARK_WORKER_WEBUI_PORT The base port for the web interface of the first # worker. Subsequent workers will increment this # number. Default is 8081. if [ -z "${SPARK_HOME}" ]; then export SPARK_HOME="$(cd "`dirname "$0"`"/..; pwd)" fi # NOTE: This exact class name is matched downstream by SparkSubmit. # Any changes need to be reflected there. CLASS="org.apache.spark.deploy.worker.Worker" if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then echo "Usage: ./sbin/start-slave.sh [options] " pattern="Usage:" pattern+="\|Using Spark's default log4j profile:" pattern+="\|Registered signal handlers for" "${SPARK_HOME}"/bin/spark-class $CLASS --help 2>&1 | grep -v "$pattern" 1>&2 exit 1 fi . "${SPARK_HOME}/sbin/spark-config.sh" . "${SPARK_HOME}/bin/load-spark-env.sh" # First argument should be the master; we need to store it aside because we may # need to insert arguments between it and the other arguments {{- if .TiSparkMaster}} MASTER=spark://{{.TiSparkMaster}}:{{.MasterPort}} shift {{- end}} # Determine desired worker port if [ "$SPARK_WORKER_WEBUI_PORT" = "" ]; then SPARK_WORKER_WEBUI_PORT=8081 fi # Start up the appropriate number of workers on this machine. # quick local function to start a worker function start_instance { WORKER_NUM=$1 shift if [ "$SPARK_WORKER_PORT" = "" ]; then PORT_FLAG= PORT_NUM= else PORT_FLAG="--port" PORT_NUM=$(( $SPARK_WORKER_PORT + $WORKER_NUM - 1 )) fi WEBUI_PORT=$(( $SPARK_WORKER_WEBUI_PORT + $WORKER_NUM - 1 )) "${SPARK_HOME}/sbin"/spark-daemon.sh start $CLASS $WORKER_NUM \ --webui-port "$WEBUI_PORT" $PORT_FLAG $PORT_NUM $MASTER "$@" } if [ "$SPARK_WORKER_INSTANCES" = "" ]; then start_instance 1 "$@" else for ((i=0; i<$SPARK_WORKER_INSTANCES; i++)); do start_instance $(( 1 + $i )) "$@" done fi tiup-1.16.3/embed/templates/systemd/000077500000000000000000000000001505422223000173205ustar00rootroot00000000000000tiup-1.16.3/embed/templates/systemd/system.service.tpl000066400000000000000000000022501505422223000230230ustar00rootroot00000000000000[Unit] Description={{.ServiceName}} service After=syslog.target network.target remote-fs.target nss-lookup.target [Service] {{- if .MemoryLimit}} MemoryLimit={{.MemoryLimit}} {{- end}} {{- if .CPUQuota}} CPUQuota={{.CPUQuota}} {{- end}} {{- if .IOReadBandwidthMax}} IOReadBandwidthMax={{.IOReadBandwidthMax}} {{- end}} {{- if .IOWriteBandwidthMax}} IOWriteBandwidthMax={{.IOWriteBandwidthMax}} {{- end}} {{- if .LimitCORE}} LimitCORE={{.LimitCORE}} {{- end}} LimitNOFILE=1000000 LimitSTACK=10485760 {{- if and .GrantCapNetRaw (eq .SystemdMode "system")}} AmbientCapabilities=CAP_NET_RAW {{- end}} {{- if eq .SystemdMode "system"}} User={{.User}} {{- end}} ExecStart=/bin/bash -c '{{.DeployDir}}/scripts/run_{{.ServiceName}}.sh' {{- if eq .ServiceName "prometheus"}} ExecReload=/bin/bash -c 'kill -HUP $MAINPID $(pidof {{.DeployDir}}/bin/ng-monitoring-server)' {{end}} {{- if .Restart}} Restart={{.Restart}} {{else}} Restart=always {{end}} RestartSec=15s {{- if .DisableSendSigkill}} SendSIGKILL=no {{- end}} {{- if .TimeoutStartSec}} TimeoutStartSec={{.TimeoutStartSec}} {{- end}} {{- if .TimeoutStopSec}} TimeoutStopSec={{.TimeoutStopSec}} {{- end}} [Install] WantedBy=multi-user.target tiup-1.16.3/embed/templates/systemd/tispark.service.tpl000066400000000000000000000007731505422223000231640ustar00rootroot00000000000000[Unit] Description={{.ServiceName}} service After=syslog.target network.target remote-fs.target nss-lookup.target [Service] User={{.User}} {{- if ne .JavaHome ""}} Environment="JAVA_HOME={{.JavaHome}}" {{- end}} ExecStart=/bin/bash -c '{{.DeployDir}}/sbin/start-{{.ServiceName}}.sh' ExecStop=/bin/bash -c '{{.DeployDir}}/sbin/stop-{{.ServiceName}}.sh' Type=forking {{- if .Restart}} Restart={{.Restart}} {{else}} Restart=always {{- end}} RestartSec=15s SendSIGKILL=no [Install] WantedBy=multi-user.target tiup-1.16.3/examples000077700000000000000000000000001505422223000171272embed/examplesustar00rootroot00000000000000tiup-1.16.3/go.mod000066400000000000000000000141011505422223000136610ustar00rootroot00000000000000module github.com/pingcap/tiup go 1.24 toolchain go1.24.1 require ( github.com/AstroProfundis/sysinfo v0.0.0-20240112160158-ed54df16e9ce github.com/BurntSushi/toml v1.5.0 github.com/ScaleFT/sshkeys v1.2.0 github.com/appleboy/easyssh-proxy v1.3.10-0.20211209134747-6671f69d85f5 github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef github.com/cavaliergopher/grab/v3 v3.0.1 github.com/cheggaaa/pb/v3 v3.1.2 github.com/creasty/defaults v1.7.0 github.com/docker/go-units v0.5.0 github.com/fatih/color v1.18.0 github.com/gibson042/canonicaljson-go v1.0.3 github.com/gizak/termui/v3 v3.1.0 github.com/go-sql-driver/mysql v1.7.1 github.com/gofrs/flock v0.8.1 github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 github.com/jedib0t/go-pretty/v6 v6.6.7 github.com/jeremywohl/flatten v1.0.1 github.com/joomcode/errorx v1.1.0 github.com/lorenzosaino/go-sysctl v0.3.1 github.com/mattn/go-runewidth v0.0.16 github.com/minio/minio-go/v7 v7.0.52 github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db github.com/pingcap/errors v0.11.5-0.20250523034308-74f78ae071ee github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 github.com/pingcap/fn v1.0.0 github.com/pingcap/kvproto v0.0.0-20230331024443-349815129e6d github.com/prometheus/client_model v0.3.0 github.com/prometheus/common v0.42.0 github.com/prometheus/prom2json v1.3.2 github.com/r3labs/diff/v3 v3.0.1 github.com/relex/aini v1.5.0 github.com/sergi/go-diff v1.3.1 github.com/sethvargo/go-password v0.2.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.10.0 github.com/vishvananda/netlink v0.0.0-20210530105856-14e832ae1e8f github.com/xo/usql v0.9.5 go.etcd.io/etcd/client/pkg/v3 v3.5.7 go.etcd.io/etcd/client/v3 v3.5.7 go.uber.org/atomic v1.11.0 go.uber.org/zap v1.24.0 golang.org/x/crypto v0.36.0 golang.org/x/mod v0.24.0 golang.org/x/sync v0.12.0 golang.org/x/sys v0.31.0 golang.org/x/term v0.30.0 golang.org/x/text v0.23.0 google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 google.golang.org/grpc v1.67.3 google.golang.org/protobuf v1.36.1 gopkg.in/ini.v1 v1.67.0 gopkg.in/yaml.v3 v3.0.1 software.sslmate.com/src/go-pkcs12 v0.2.0 ) require ( cloud.google.com/go v0.116.0 // indirect cloud.google.com/go/bigquery v1.64.0 // indirect cloud.google.com/go/spanner v1.73.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/alecthomas/chroma v0.9.4 // indirect github.com/apache/thrift v0.17.0 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/chavacava/garif v0.1.0 // indirect github.com/chzyer/test v1.0.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect github.com/dlclark/regexp2 v1.4.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/gohxs/readline v0.0.0-20171011095936-a780388e6e7c // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect github.com/mgechev/revive v1.7.0 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nathan-fiscaletti/consolesize-go v0.0.0-20210105204122-a87d9f614b9d // indirect github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/xid v1.4.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/afero v1.14.0 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xo/dburl v0.9.0 // indirect github.com/xo/tblfmt v0.8.0 // indirect github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect github.com/zaf/temp v0.0.0-20170209143821-94e385923345 // indirect go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.uber.org/goleak v1.2.1 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/net v0.37.0 // indirect golang.org/x/tools v0.31.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/api v0.203.0 // indirect google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect honnef.co/go/tools v0.4.3 // indirect lukechampine.com/uint128 v1.3.0 // indirect modernc.org/sqlite v1.21.2 // indirect modernc.org/token v1.1.0 // indirect ) tool ( github.com/mgechev/revive github.com/pingcap/failpoint/failpoint-ctl ) tiup-1.16.3/go.sum000066400000000000000000002072341505422223000137210ustar00rootroot00000000000000cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y= cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.64.0 h1:vSSZisNyhr2ioJE1OuYBQrnrpB7pIhRQm4jfjc7E/js= cloud.google.com/go/bigquery v1.64.0/go.mod h1:gy8Ooz6HF7QmA+TRtX8tZmXBKH5mCFBwUApGAb3zI7Y= cloud.google.com/go/compute v1.29.0 h1:Lph6d8oPi38NHkOr6S55Nus/Pbbcp37m/J0ohgKAefs= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= cloud.google.com/go/spanner v1.73.0 h1:0bab8QDn6MNj9lNK6XyGAVFhMlhMU2waePPa6GZNoi8= cloud.google.com/go/spanner v1.73.0/go.mod h1:mw98ua5ggQXVWwp83yjwggqEmW9t8rjs9Po1ohcUGW4= github.com/AstroProfundis/sysinfo v0.0.0-20240112160158-ed54df16e9ce h1:gZeTM/cITaHpJzdV1kMfq/XdEMQPYv8rdeVueGrbCkY= github.com/AstroProfundis/sysinfo v0.0.0-20240112160158-ed54df16e9ce/go.mod h1:InOUjgR40M4ZG3lDfn8taRJxzzvU8FTF/v6IBNKXjyQ= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/ClickHouse/clickhouse-go v1.5.1 h1:I8zVFZTz80crCs0FFEBJooIxsPcV0xfthzK1YrkpJTc= github.com/ClickHouse/clickhouse-go v1.5.1/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 h1:oVLqHXhnYtUwM89y9T1fXGaK9wTkXHgNp8/ZNMQzUxE= github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= github.com/IBM/nzgo v0.0.0-20210406171630-186d127e2795 h1:W9EhSwnh9MrP4MW89gKCoe/t59uZkDL3bGMTirPyvyI= github.com/IBM/nzgo v0.0.0-20210406171630-186d127e2795/go.mod h1:n1QK6KJjNa8fe+HQPynW+mJpRpkffLXMO8LR9Nja0JU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/MichaelS11/go-cql-driver v0.1.1 h1:ntFKov/39Tl36HckP4tzld3XMeyDYHHO00MiZNdoL1A= github.com/MichaelS11/go-cql-driver v0.1.1/go.mod h1:rMwGk5bMWiYI/If6r6dbqEfZG6nQLvqJHTplv5yTDaw= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/SAP/go-hdb v0.105.5 h1:mop9KOZU1ro9PjJLgqseHUZzwoOJh4d7f8Nrvf1a2Uo= github.com/SAP/go-hdb v0.105.5/go.mod h1:xbtJDvjqm9MQhIAnalynGNAbqxolS9W02qQo/vBqyaU= github.com/ScaleFT/sshkeys v0.0.0-20200327173127-6142f742bca5/go.mod h1:gxOHeajFfvGQh/fxlC8oOKBe23xnnJTif00IFFbiT+o= github.com/ScaleFT/sshkeys v1.2.0 h1:5BRp6rTVIhJzXT3VcUQrKgXR8zWA3sOsNeuyW15WUA8= github.com/ScaleFT/sshkeys v1.2.0/go.mod h1:gxOHeajFfvGQh/fxlC8oOKBe23xnnJTif00IFFbiT+o= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/VoltDB/voltdb-client-go v1.0.8 h1:Xhf4LREQumXaRSUixMt1eWea6+0dz/Yo9mkqyEULbj8= github.com/VoltDB/voltdb-client-go v1.0.8/go.mod h1:mMhb5zwkT46Ef3NvkFqt+kX0j+ltQ2Sdqj9+ICq+Yto= github.com/alecthomas/chroma v0.9.4 h1:YL7sOAE3p8HS96T9km7RgvmsZIctqbK1qJ0b7hzed44= github.com/alecthomas/chroma v0.9.4/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s= github.com/alexbrainman/odbc v0.0.0-20210605012845-39f8520b0d5f h1:qJp6jWdG+PBNCDtIwRpspahMaZ3hlfde/25ExBORKso= github.com/alexbrainman/odbc v0.0.0-20210605012845-39f8520b0d5f/go.mod h1:c5eyz5amZqTKvY3ipqerFO/74a/8CYmXOahSr40c+Ww= github.com/amsokol/ignite-go-client v0.12.2 h1:q4Mr+UUiKVnR7ykjR1YARVS5jp+ZU6ekCIs0V4WgFDo= github.com/amsokol/ignite-go-client v0.12.2/go.mod h1:K3tKJGcLQORFD+ds7f0f9fl88tv0KZcpfuNhzRyuLVE= github.com/apache/arrow/go/arrow v0.0.0-20210901201644-e9eeff1c9297 h1:c4n5hcewl0g/LOJMGz+nvMnru5G2AE0lWwK5cKQ63zo= github.com/apache/arrow/go/arrow v0.0.0-20210901201644-e9eeff1c9297/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE= github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= github.com/apache/calcite-avatica-go/v5 v5.0.0 h1:B0l4O6KGR/sRHDaAgFT+AdGFouvp5Y/B9/82ag5rhds= github.com/apache/calcite-avatica-go/v5 v5.0.0/go.mod h1:mbmgGJJQk6WZfh7U9z0QOa2OjS4L9w5bVMj2LD6wwVM= github.com/apache/thrift v0.17.0 h1:cMd2aj52n+8VoAtvSvLn4kDC3aZ6IAkBuqWQ2IDu7wo= github.com/apache/thrift v0.17.0/go.mod h1:OLxhMRJxomX+1I/KUw03qoV3mMz16BwaKI+d4fPBx7Q= github.com/appleboy/easyssh-proxy v1.3.10-0.20211209134747-6671f69d85f5 h1:4YNuL/4gurMQu1r10vp9kuD3X2z0Nn4VayQpmIjoJlY= github.com/appleboy/easyssh-proxy v1.3.10-0.20211209134747-6671f69d85f5/go.mod h1:SnOlkuIAoaj8FvRIGcWp2BbLOwvHZnNNOva+DBMbHWU= github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef h1:2JGTg6JapxP9/R33ZaagQtAM4EkkSYnIAlOG5EI8gkM= github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef/go.mod h1:JS7hed4L1fj0hXcyEejnW57/7LCetXggd+vwrRnYeII= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.42.9 h1:8ptAGgA+uC2TUbdvUeOVSfBocIZvGE2NKiLxkAcn1GA= github.com/aws/aws-sdk-go v1.42.9/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v1.9.0 h1:+S+dSqQCN3MSU5vJRu1HqHrq00cJn6heIMU7X9hcsoo= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/credentials v1.4.0 h1:kmvesfjY861FzlCU9mvAfe01D9aeXcG2ZuC+k9F2YLM= github.com/aws/aws-sdk-go-v2/credentials v1.4.0/go.mod h1:dgGR+Qq7Wjcd4AOAW5Rf5Tnv3+x7ed6kETXyS9WCuAY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.0 h1:zzH1xd1/PX7bFO4/BQwVQP+UcBfYieI1sMH9DA68xZY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.0/go.mod h1:fPU0eFGnS47RyKHHs8BNcCKOm5oOA5xm0BlrZWsQT/A= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0 h1:VNJ5NLBteVXEwE2F1zEXVmyIH58mZ6kIQGJoC7C+vkg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.0/go.mod h1:R1KK+vY8AfalhG1AOu5e35pOD2SdoPKQCFLTvnxiohk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.6.0 h1:B/1pIeV/oFnrOwhoMA6ASX+qT4FzMqn1MYsPiIXgMqQ= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.6.0/go.mod h1:LKb3cKNQIMh+itGnEpKGcnL/6OIjPZqrtYah1w5f+3o= github.com/aws/aws-sdk-go-v2/service/s3 v1.14.0 h1:nR9j0xMxpXk6orC/C03fbHNrbb1NaXp8LdVV7V1oVLE= github.com/aws/aws-sdk-go-v2/service/s3 v1.14.0/go.mod h1:Qit9H3zjAmF7CLHOkrepE9b2ndX/2l3scstsM5g2jSk= github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beltran/gohive v1.5.1 h1:lsOIka3miPuzz1s39KQZNXwlf90QKekBC6ZsuRUVlcU= github.com/beltran/gohive v1.5.1/go.mod h1:BJbXAhof7gWk5+kl0y6Ox8TFDJ1xv6gwDksF7l15LFI= github.com/beltran/gosasl v0.0.0-20210629234946-b41ac5bb612a h1:bOmOqN7GrefYKBHR7KIILIQHjyqYB9h5hluINvBUY6I= github.com/beltran/gosasl v0.0.0-20210629234946-b41ac5bb612a/go.mod h1:Qx8cW6jkI8riyzmklj80kAIkv+iezFUTBiGU0qHhHes= github.com/beltran/gssapi v0.0.0-20200324152954-d86554db4bab h1:ayfcn60tXOSYy5zUN1AMSTQo4nJCf7hrdzAVchpPst4= github.com/beltran/gssapi v0.0.0-20200324152954-d86554db4bab/go.mod h1:GLe4UoSyvJ3cVG+DVtKen5eAiaD8mAJFuV5PT3Eeg9Q= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bippio/go-impala v2.1.0+incompatible h1:H/N1Ms5KhVa2IoRZ6NO9ZzBryfTGhNiLG/zmNXS0CHY= github.com/bippio/go-impala v2.1.0+incompatible/go.mod h1:lcyV/9s/ri5lFj3zdyyneQwDso8/Fd62fELt05Wts8g= github.com/btnguyen2k/consu/gjrc v0.1.1 h1:2ZXT2ySAFt5yJbdR2BAbwRKl5OjcysJeg3pzF4Hw5bE= github.com/btnguyen2k/consu/gjrc v0.1.1/go.mod h1:jOTI8M8Kkly9fHyeSoBTw3o2Q9ayQEEYTHQfedIEqzE= github.com/btnguyen2k/consu/olaf v0.1.3 h1:0dWWmN5nOB/9pJdo7o1S3wR2+l3kG7pXHv3Vwki8uNM= github.com/btnguyen2k/consu/olaf v0.1.3/go.mod h1:6ybEnJcdcK/PNiSfkKnMoxYuKyH2vJPBvHRuuZpPvD8= github.com/btnguyen2k/consu/reddo v0.1.7 h1:YhU/UWSzUki3WUi31lsByx+OWNCKs9uKjiy8G7MmwW0= github.com/btnguyen2k/consu/reddo v0.1.7/go.mod h1:pdY5oIVX3noZIaZu3nvoKZ59+seXL/taXNGWh9xJDbg= github.com/btnguyen2k/consu/semita v0.1.5 h1:fu71xNJTbCV8T+6QPJdJu3bxtmLWvTjCepkvujF74+I= github.com/btnguyen2k/consu/semita v0.1.5/go.mod h1:fksCe3L4kxiJVnKKhUXKI8mcFdB9974mtedwUVVFu1M= github.com/btnguyen2k/gocosmos v0.1.4 h1:9R12Hvk5/6HV9HYNLr103z0LNzWt9FJTRDaDPSV8lao= github.com/btnguyen2k/gocosmos v0.1.4/go.mod h1:BYrAnNLG+e5NCz9q0X9dkjhMM1XkYhyRG74bQX4Wb1w= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/cheggaaa/pb/v3 v3.1.2 h1:FIxT3ZjOj9XJl0U4o2XbEhjFfZl7jCVCDOGq1ZAB7wQ= github.com/cheggaaa/pb/v3 v3.1.2/go.mod h1:SNjnd0yKcW+kw0brSusraeDd5Bf1zBfxAzTL2ss3yQ4= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cloudspannerecosystem/go-sql-spanner v0.0.0-20211102174514-f5f9ecd7ca01 h1:rCois7yiTyOqN0+OzK02yba6p4jAzDx7nyzFsAtY7TY= github.com/cloudspannerecosystem/go-sql-spanner v0.0.0-20211102174514-f5f9ecd7ca01/go.mod h1:TIVV66EXen7aCX7vJg4akbc4BwkYC+kn0RADJk0h73s= github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs= github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/couchbase/go-couchbase v0.1.1 h1:ClFXELcKj/ojyoTYbsY34QUrrYCBi/1G749sXSCkdhk= github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= github.com/couchbase/go_n1ql v0.0.0-20160215142504-6cf4e348b127 h1:6Y3osD8rWOIAu/z5lrBns2cZSKSr6s+GEX64Ii0JtuI= github.com/couchbase/go_n1ql v0.0.0-20160215142504-6cf4e348b127/go.mod h1:Rn19fO9CVfhJkqyIED9ixL5Kh5XuH7hXgDTxyfGY7hM= github.com/couchbase/gomemcached v0.1.4 h1:5n5wmr4dBu+X7XteP8QHP5S9inK9MBjNpN9b7WSQfuA= github.com/couchbase/gomemcached v0.1.4/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.2 h1:gWr8B6XNWPIhfalHNog3qQKfGiYyh4K4VhO3P2o9BCs= github.com/couchbase/goutils v0.1.2/go.mod h1:h89Ek/tiOxxqjz30nPPlwZdQbdB8BwgnuBxeoUe/ViE= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creasty/defaults v1.7.0 h1:eNdqZvc5B509z18lD8yc212CAqJNvfT1Jq6L8WowdBA= github.com/creasty/defaults v1.7.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a h1:saTgr5tMLFnmy/yg3qDTft4rE5DY2uJ/cCxCe3q0XTU= github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a/go.mod h1:Bw9BbhOJVNR+t0jCqx2GC6zv0TGBsShs56Y3gfSCvl0= github.com/denisenkom/go-mssqldb v0.11.0 h1:9rHa233rhdOyrz2GcP9NM+gi2psgJZ4GWDpL/7ND8HI= github.com/denisenkom/go-mssqldb v0.11.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/exasol/error-reporting-go v0.1.0 h1:p81VbfknFLJHT1jnAlSa1iz3Ysx8sMSJnVnNlqarVqQ= github.com/exasol/error-reporting-go v0.1.0/go.mod h1:im4x2ucQTPXFn+/B1EFcya6dnSUNYsuf6wC0DefHLK4= github.com/exasol/exasol-driver-go v0.2.0 h1:ASHugf1q5otYj2uqk3/ezKrQ6/kytqYEuBzw9arH21c= github.com/exasol/exasol-driver-go v0.2.0/go.mod h1:3plWWycl0TqHpTY6rQxNImIIk5iLPfVfaRhwsPnP8rs= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/genjidb/genji v0.13.0 h1:WKjuoy4TRemCxVgcZ1/+POGKhjkDj3ZSTB/zlHmS7l0= github.com/genjidb/genji v0.13.0/go.mod h1:2QAT8ZovhZiEDQCaeOMI4HW5UiLm6PmXUxlUiKEuSjU= github.com/gibson042/canonicaljson-go v1.0.3 h1:EAyF8L74AWabkyUmrvEFHEt/AGFQeD6RfwbAuf0j1bI= github.com/gibson042/canonicaljson-go v1.0.3/go.mod h1:DsLpJTThXyGNO+KZlI85C1/KDcImpP67k/RKVjcaEqo= github.com/gizak/termui/v3 v3.1.0 h1:ZZmVDgwHl7gR7elfKf1xc4IudXZ5qqfDh4wExk4Iajc= github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/errors v0.20.1 h1:j23mMDtRxMwIobkpId7sWh7Ddcx4ivaoqUbfXx5P+a8= github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/strfmt v0.21.0 h1:hX2qEZKmYks+t0hKeb4VTJpUm2UYsdL3+DCid5swxIs= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/gocql/gocql v0.0.0-20211015133455-b225f9b53fa1 h1:px9qUCy/RNJNsfCam4m2IxWGxNuimkrioEF0vrrbPsg= github.com/gocql/gocql v0.0.0-20211015133455-b225f9b53fa1/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.25.3 h1:ltL94Ct9otjMfUNTRMqyZh0GpepPd9f9pyFgtUciT9k= github.com/godror/godror v0.25.3/go.mod h1:JgtdZ1iSaNoioa/B53BVVWji9J9iGPDDj2763T5d1So= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gohxs/readline v0.0.0-20171011095936-a780388e6e7c h1:yE35fKFwcelIte3q5q1/cPiY7pI7vvf5/j/0ddxNCKs= github.com/gohxs/readline v0.0.0-20171011095936-a780388e6e7c/go.mod h1:9S/fKAutQ6wVHqm1jnp9D9sc5hu689s9AaTWFS92LaU= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU= github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs= github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570= github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= github.com/jedib0t/go-pretty/v6 v6.6.7 h1:m+LbHpm0aIAPLzLbMfn8dc3Ht8MW7lsSO4MPItz/Uuo= github.com/jedib0t/go-pretty/v6 v6.6.7/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU= github.com/jeremywohl/flatten v1.0.1 h1:LrsxmB3hfwJuE+ptGOijix1PIfOoKLJ3Uee/mzbgtrs= github.com/jeremywohl/flatten v1.0.1/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO0nAlMHgfLQ= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmrobles/h2go v0.5.0 h1:r+V3J1+8z5tExKHcVc8u0tXJfov391zEffJYALWKhA0= github.com/jmrobles/h2go v0.5.0/go.mod h1:p7Vjfu/9f7g2RI1CkpwXnwqskV+47HviBg4C4FlW8eI= github.com/joomcode/errorx v1.1.0 h1:dizuSG6yHzlvXOOGHW00gwsmM4Sb9x/yWEfdtPztqcs= github.com/joomcode/errorx v1.1.0/go.mod h1:eQzdtdlNyN7etw6YCS4W4+lu442waxZYw5yvz0ULrRo= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lorenzosaino/go-sysctl v0.3.1 h1:3phX80tdITw2fJjZlwbXQnDWs4S30beNcMbw0cn0HtY= github.com/lorenzosaino/go-sysctl v0.3.1/go.mod h1:5grcsBRpspKknNS1qzt1eIeRDLrhpKZAtz8Fcuvs1Rc= github.com/mattn/go-adodb v0.0.1 h1:g/pk3V8m/WFX2IQRI58wAC24OQUFFXEiNsvs7dQ1WKg= github.com/mattn/go-adodb v0.0.1/go.mod h1:jaSTRde4bohMuQgYQPxW3xRTPtX/cZKyxPrFVseJULo= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.7.0 h1:JyeQ4yO5K8aZhIKf5rec56u0376h8AlKNQEmjfkjKlY= github.com/mgechev/revive v1.7.0/go.mod h1:qZnwcNhoguE58dfi96IJeSTPeZQejNeoMQLUZGi4SW4= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.52 h1:8XhG36F6oKQUDDSuz6dY3rioMzovKjW40W6ANuN0Dps= github.com/minio/minio-go/v7 v7.0.52/go.mod h1:IbbodHyjUAguneyucUaahv+VMNs/EOTV9du7A7/Z3HU= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mithrandie/csvq v1.15.2 h1:m64b9WFGN7ACxnK3zWHEr+XPu+dvl3MnFzokaoHefPA= github.com/mithrandie/csvq v1.15.2/go.mod h1:90MXOaH60IGYnudlDViKO3NZr5/HV8L4vqD5bf7re1E= github.com/mithrandie/csvq-driver v1.4.3 h1:BmAprI8cZzXrhldfVj1+XEVJuGbMaeXxWNOGZ1pRrOU= github.com/mithrandie/csvq-driver v1.4.3/go.mod h1:K9H6vQEQnB/ryujBiUfRbuYAawtrAzadnRV1z/BnFBA= github.com/mithrandie/go-file/v2 v2.0.2 h1:3/yzItlTssDX9wOZrj9MtRyXbr52OZURmXFMuvpJ6Fg= github.com/mithrandie/go-file/v2 v2.0.2/go.mod h1:98a9loPjYr7ffsfwMDdJ7iH/dO8EXAca8XiI6SZpPV8= github.com/mithrandie/go-text v1.4.2 h1:TPL4rDywSn796sRaE4U73xIW+0AYgwiALnBrP/pRHMs= github.com/mithrandie/go-text v1.4.2/go.mod h1:Ivtyn3cuJYAs2UpihWmnY1KAixmLonELJ454EOZGzDg= github.com/mithrandie/readline-csvq v1.1.1 h1:kp9W5WPUAB+NOgW5axPdu8mZe1M9CP/D1xpabj39JVY= github.com/mithrandie/readline-csvq v1.1.1/go.mod h1:eOJt0j6UI9lhwM/KP+v40ugarhXsnPIXStvkfIaq79E= github.com/mithrandie/ternary v1.1.0 h1:BlN8EoTsIYjhuWkfXHrh7+G+/Y0VvvWGVVldyjNH2VU= github.com/mithrandie/ternary v1.1.0/go.mod h1:0D9Ba3+09K2TdSZO7/bFCC0GjSXetCvYuYq0u8FY/1g= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/nakagami/firebirdsql v0.9.3 h1:/uNYufsFSRZF6n1xqzQxXY6b2TRD8o4aiPlb+qYGQH8= github.com/nakagami/firebirdsql v0.9.3/go.mod h1:yU71hYllTfU4JbEysWLY2XovxsFnaLWkraaegJ7GW3M= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20210105204122-a87d9f614b9d h1:PQW4Aqovdqc9efHl9EVA+bhKmuZ4ME1HvSYYDvaDiK0= github.com/nathan-fiscaletti/consolesize-go v0.0.0-20210105204122-a87d9f614b9d/go.mod h1:cxIIfNMTwff8f/ZvRouvWYF6wOoO7nj99neWSx2q/Es= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d h1:x3S6kxmy49zXVVyhcnrFqxvNVCBPb2KZ9hV2RBdS840= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/ory/dockertest/v3 v3.6.3 h1:L8JWiGgR+fnj90AEOkTFIEp4j5uWAK72P3IUsYgn2cs= github.com/ory/dockertest/v3 v3.6.3/go.mod h1:EFLcVUOl8qCwp9NyDAcCDtq/QviLtYswW/VbWzUnTNE= github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pingcap/errors v0.11.5-0.20250523034308-74f78ae071ee h1:/IDPbpzkzA97t1/Z1+C3KlxbevjMeaI6BQYxvivu4u8= github.com/pingcap/errors v0.11.5-0.20250523034308-74f78ae071ee/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXHoBitzdebTvOjs/swniBTOLy5XiMtuE= github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4= github.com/pingcap/fn v1.0.0 h1:CyA6AxcOZkQh52wIqYlAmaVmF6EvrcqFywP463pjA8g= github.com/pingcap/fn v1.0.0/go.mod h1:u9WZ1ZiOD1RpNhcI42RucFh/lBuzTu6rw88a+oF2Z24= github.com/pingcap/kvproto v0.0.0-20230331024443-349815129e6d h1:QR9Gk/Hi7DU399ec81cG7b3X/Umwv8FIcyx5WwD+O7M= github.com/pingcap/kvproto v0.0.0-20230331024443-349815129e6d/go.mod h1:RjuuhxITxwATlt5adgTedg3ehKk01M03L1U4jNHdeeQ= github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2 h1:acNfDZXmm28D2Yg/c3ALnZStzNaZMSagpbr96vY6Zjc= github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prestodb/presto-go-client v0.0.0-20201204133205-8958eb37e584 h1:utxLhK2pYK3qDug5474YP2bbm2dbAPXA+dDrTvrkxeU= github.com/prestodb/presto-go-client v0.0.0-20201204133205-8958eb37e584/go.mod h1:cwaFkElLIrI4vTXo5A1oDobUBFad0aVtZiZvfxJyX6I= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/prom2json v1.3.2 h1:heRKAGHWqm8N3IaRDDNBBJNVS6a9mLdsTlFhvOaNGb0= github.com/prometheus/prom2json v1.3.2/go.mod h1:TQ9o1OxW0eyhl4BBpVpGGsavyJfTDETna4/d0Kib+V0= github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= github.com/relex/aini v1.5.0 h1:6euW/m6b2Y2hkSY8rsyGzcYGpMUWx2dnTzXgQvunTzQ= github.com/relex/aini v1.5.0/go.mod h1:qUMEteDeWDTMHUP7WsaOTc7gawELU5Gcrn2YHz4EAr0= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sijms/go-ora/v2 v2.2.15 h1:GJfudyOHT+DUgBPvS2wP/8lgmuer3OKfha8C0xnThW8= github.com/sijms/go-ora/v2 v2.2.15/go.mod h1:jzfAFD+4CXHE+LjGWFl6cPrtiIpQVxakI2gvrMF2w6Y= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/snowflakedb/gosnowflake v1.6.1 h1:gaRt3oK7ATFmLgAg6Gw7aKvWhWts3WV33d0YE4Ofh2U= github.com/snowflakedb/gosnowflake v1.6.1/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5WpGiayY6lFNYb98= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/thda/tds v0.1.7 h1:s29kbnJK0agL3ps85A/sb9XS2uxgKF5UJ6AZjbyqXX4= github.com/thda/tds v0.1.7/go.mod h1:isLIF1oZdXfkqVMJM8RyNrsjlHPlTKnPlnsBs7ngZcM= github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/trinodb/trino-go-client v0.300.0 h1:DnTskW//HKm9oggYviFsYIMdLyl7TqlWs4yXhW9NlKE= github.com/trinodb/trino-go-client v0.300.0/go.mod h1:/CMFmXqrFGmn76o/ZGefBLq7X6l0tsTJV4zrgQMVvvM= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/uber-go/tally v3.4.2+incompatible h1:wEKPHq3KIjguuHz/M6SXVjDlUTh+39OtnhlLWsfR7z0= github.com/uber-go/tally v3.4.2+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber/athenadriver v1.1.13 h1:h67yRqRl0Vj5f30bn6Nsn42bI08rY+yielMn1pdUhV0= github.com/uber/athenadriver v1.1.13/go.mod h1:mpa8cVqc/JH/xnViFoWFvNkoYyIj5ohn9mxAnBj3fik= github.com/unchartedsoftware/witch v0.0.0-20200617171400-4f405404126f h1:CfRRJzp88GWCAR3+GrJ/slqrbhCqesDRY/FmT6s4uF8= github.com/unchartedsoftware/witch v0.0.0-20200617171400-4f405404126f/go.mod h1:xN7cr17jYCNJzZO78A4c0BcspGPbSAdFNq1NfDEE2do= github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vertica/vertica-sql-go v1.2.0 h1:9znZNdweNWHarVf0iUEsUjnEihrtaAXkrVmb3JDnsx0= github.com/vertica/vertica-sql-go v1.2.0/go.mod h1:fGr44VWdEvL+f+Qt5LkKLOT7GoxaWdoUCnPBU9h6t04= github.com/vishvananda/netlink v0.0.0-20210530105856-14e832ae1e8f h1:uKAQrhXS4xtXu+lAQ6Rh8rER4nVuUy9km9ayWDCF2x4= github.com/vishvananda/netlink v0.0.0-20210530105856-14e832ae1e8f/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xinsnake/go-http-digest-auth-client v0.6.0 h1:nrYFWDrB2F7VwYlNravXZS0nOtg9axlATH3Jns55/F0= github.com/xinsnake/go-http-digest-auth-client v0.6.0/go.mod h1:QK1t1v7ylyGb363vGWu+6Irh7gyFj+N7+UZzM0L6g8I= github.com/xo/dburl v0.9.0 h1:ME8QfRqZz/YDwf+VVEe9sq4wgEZCAOdYcUTeuAf+wQQ= github.com/xo/dburl v0.9.0/go.mod h1:7Uupe87dIDxNrbKFRrpw6bAf2l3/rqU42iwlpq1nyjY= github.com/xo/tblfmt v0.8.0 h1:/7DE0kyTdhFfdAK1jEUMDBqy1m78jRMSXGtD48Z4aUA= github.com/xo/tblfmt v0.8.0/go.mod h1:N3SzkwwBiNenc9Ox19XjKyBsKp8hmx2ttKcI1ZMfj2E= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xo/usql v0.9.5 h1:II6gbWtwPQtMNw+wE32zz30wLUk8V9FWW1Km6lt0vgY= github.com/xo/usql v0.9.5/go.mod h1:DORND3Bhs6CglGmbFQdlA5OEw4IINiOKOLCbsiHFbhg= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zaf/temp v0.0.0-20170209143821-94e385923345 h1:YirhcaVb0RNq54Vh/50S0MPEbr9b4tjZVXvoeeKoYyc= github.com/zaf/temp v0.0.0-20170209143821-94e385923345/go.mod h1:sXsZgXwh6DB0qlskmZVB4HE93e5YrktMrgUDPy9iYmY= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.mongodb.org/mongo-driver v1.7.4 h1:sllcioag8Mec0LYkftYWq+cKNPIR4Kqq3iv9ZXY0g/E= go.mongodb.org/mongo-driver v1.7.4/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 h1:e7LhZmJ631l59keHP9ssC3sgSn3/oiEHKHKXDkimURY= golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/gokrb5.v6 v6.1.1 h1:n0KFjpbuM5pFMN38/Ay+Br3l91netGSVqHPHEXeWUqk= gopkg.in/jcmturner/gokrb5.v6 v6.1.1/go.mod h1:NFjHNLrHQiruory+EmqDXCGv6CrjkeYeA+bR9mIfNFk= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/bigquery v1.0.16 h1:KlAgkctWTlliZHxrzS1gCkTmbm0M4p/wOl/I7sbphL8= gorm.io/driver/bigquery v1.0.16/go.mod h1:fXwpNNQBJ5qbgTt5594M9/zI6rC1iZ7hlmpv5whAgKA= honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/b v1.0.2 h1:iPC2u39ebzq12GOC2yXT4mve0HrWcH85cz+midWjzeo= modernc.org/b v1.0.2/go.mod h1:fVGfCIzkZw5RsuF2A2WHbJmY7FiMIq30nP4s52uWsoY= modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/db v1.0.3 h1:apxOlWU69je04bY22OT6J0RL23mzvUy22EgTAVyw+Yg= modernc.org/db v1.0.3/go.mod h1:L4ltUg8tu2pkSJk+fKaRrXs/3EdW79ZKYQ5PfVDT53U= modernc.org/file v1.0.3 h1:McYGAMMuqjRp6ptmpcLr3r5yw3gNPsonFCAJ0tNK74U= modernc.org/file v1.0.3/go.mod h1:CNj/pwOfCtCbqiHcXDUlHBB2vWrzdaDCWdcnjtS1+XY= modernc.org/fileutil v1.0.0 h1:Z1AFLZwl6BO8A5NldQg/xTSjGLetp+1Ubvl4alfGx8w= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM= modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= modernc.org/internal v1.0.2 h1:Sn3+ojjMRnPaOR6jFISs6KAdRHnR4q9KNuwfKINKmZA= modernc.org/internal v1.0.2/go.mod h1:bycJAcev709ZU/47nil584PeBD+kbu8nv61ozeMso9E= modernc.org/libc v1.22.4 h1:wymSbZb0AlrjdAVX3cjreCHTPCpPARbQXNz6BHPzdwQ= modernc.org/libc v1.22.4/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= modernc.org/lldb v1.0.2 h1:LBw58xVFl01OuM5U9++tLy3wmu+PoWok6T3dHuNjcZk= modernc.org/lldb v1.0.2/go.mod h1:ovbKqyzA9H/iPwHkAOH0qJbIQVT9rlijecenxDwVUi0= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/ql v1.4.0 h1:CqLAho+y4N8JwvqT7NJsYsp7YPwiRv6RE2n0n1ksSCU= modernc.org/ql v1.4.0/go.mod h1:q4c29Bgdx+iAtxx47ODW5Xo2X0PDkjSCK9NdQl6KFxc= modernc.org/sortutil v1.1.0 h1:oP3U4uM+NT/qBQcbg/K2iqAX0Nx7B1b6YZtq3Gk/PjM= modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= modernc.org/sqlite v1.21.2 h1:ixuUG0QS413Vfzyx6FWx6PYTmHaOegTY+hjzhn7L+a0= modernc.org/sqlite v1.21.2/go.mod h1:cxbLkB5WS32DnQqeH4h4o1B0eMr8W/y8/RGuxQ3JsC0= modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/zappy v1.0.3 h1:Tr+P3kclDSrvC6zYBW2hWmOmu5SjG6PtvCt3RCjRmss= modernc.org/zappy v1.0.3/go.mod h1:w/Akq8ipfols/xZJdR5IYiQNOqC80qz2mVvsEwEbkiI= software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE= software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ= sqlflow.org/gohive v0.0.0-20200521083454-ed52ee669b84 h1:6Q27ES8FgvwwQssYYqyR7rFLisaDA1Iqqk9TSWpTHxw= sqlflow.org/gohive v0.0.0-20200521083454-ed52ee669b84/go.mod h1:IudT38uGbK5q+Ztx2AsZDzjc04yBsIOUtIDMh/WSJkk= sqlflow.org/gomaxcompute v0.0.0-20210805062559-c14ae028b44c h1:Zo3qlfUn/rlMx9vWHpGE/luEtweuXHwrYbrFZwTG978= sqlflow.org/gomaxcompute v0.0.0-20210805062559-c14ae028b44c/go.mod h1:MxRFJp6UEk1OfnnVOIL3Jc7ROBH0dOpwF/J14A9LNdM= tiup-1.16.3/install.sh000077500000000000000000000052031505422223000145630ustar00rootroot00000000000000#!/bin/sh repo='https://tiup-mirrors.pingcap.com' if [ -n "$TIUP_MIRRORS" ]; then repo=$TIUP_MIRRORS fi case $(uname -s) in Linux|linux) os=linux ;; Darwin|darwin) os=darwin ;; *) os= ;; esac if [ -z "$os" ]; then echo "OS $(uname -s) not supported." >&2 exit 1 fi case $(uname -m) in amd64|x86_64) arch=amd64 ;; arm64|aarch64) arch=arm64 ;; *) arch= ;; esac if [ -z "$arch" ]; then echo "Architecture $(uname -m) not supported." >&2 exit 1 fi if [ -z "$TIUP_HOME" ]; then TIUP_HOME=$HOME/.tiup fi bin_dir=$TIUP_HOME/bin mkdir -p "$bin_dir" install_binary() { curl "$repo/tiup-$os-$arch.tar.gz?$(date "+%Y%m%d%H%M%S")" -o "/tmp/tiup-$os-$arch.tar.gz" || return 1 tar -zxf "/tmp/tiup-$os-$arch.tar.gz" -C "$bin_dir" || return 1 rm "/tmp/tiup-$os-$arch.tar.gz" return 0 } check_depends() { pass=0 command -v curl >/dev/null || { echo "Dependency check failed: please install 'curl' before proceeding." pass=1 } command -v tar >/dev/null || { echo "Dependency check failed: please install 'tar' before proceeding." pass=1 } return $pass } if ! check_depends; then exit 1 fi if ! install_binary; then echo "Failed to download and/or extract tiup archive." exit 1 fi chmod 755 "$bin_dir/tiup" "$bin_dir/tiup" mirror set $repo --silent bold=$(tput bold 2>/dev/null) green=$(tput setaf 2 2>/dev/null) cyan=$(tput setaf 6 2>/dev/null) sgr0=$(tput sgr0 2>/dev/null) echo # Reference: https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix shell=$(echo $SHELL | awk 'BEGIN {FS="/";} { print $NF }') echo "Detected shell: ${bold}$shell${sgr0}" if [ -f "${HOME}/.${shell}_profile" ]; then PROFILE=${HOME}/.${shell}_profile elif [ -f "${HOME}/.${shell}_login" ]; then PROFILE=${HOME}/.${shell}_login elif [ -f "${HOME}/.${shell}rc" ]; then PROFILE=${HOME}/.${shell}rc else PROFILE=${HOME}/.profile fi echo "Shell profile: ${bold}$PROFILE${sgr0}" echo echo "${bold}${green}✔ ${sgr0}Installed in ${bold}$bin_dir/tiup${sgr0}" case :$PATH: in *:$bin_dir:*) echo "${bold}${green}✔ ${sgr0}tiup PATH is already set, skip" ;; *) printf '\nexport PATH=%s:$PATH\n' "$bin_dir" >> "$PROFILE" echo "${bold}${green}✔ ${sgr0}Added tiup PATH into ${bold}${shell}${sgr0} profile" ;; esac echo echo "${bold}tiup is installed now${sgr0} 🎉" echo echo Next step: echo echo " 1: To make PATH change effective, restart your shell or execute:" echo " ${bold}${cyan}source ${PROFILE}${sgr0}" echo echo " 2: Start a local TiDB for development:" echo " ${bold}${cyan}tiup playground${sgr0}" tiup-1.16.3/main.go000066400000000000000000000011331505422223000140270ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "github.com/pingcap/tiup/cmd" ) func main() { cmd.Execute() } tiup-1.16.3/main_test.go000066400000000000000000000020561505422223000150730ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "os" "strings" "testing" ) // To build: // see build_tiup_test in Makefile // To run: // tiup.test -test.coverprofile={file} __DEVEL--i-heard-you-like-tests func TestMain(t *testing.T) { var ( args []string run bool ) for _, arg := range os.Args { switch { case arg == "__DEVEL--i-heard-you-like-tests": run = true case strings.HasPrefix(arg, "-test"): case strings.HasPrefix(arg, "__DEVEL"): default: args = append(args, arg) } } os.Args = args // fmt.Println(os.Args) if run { main() } } tiup-1.16.3/pkg/000077500000000000000000000000001505422223000133375ustar00rootroot00000000000000tiup-1.16.3/pkg/base52/000077500000000000000000000000001505422223000144205ustar00rootroot00000000000000tiup-1.16.3/pkg/base52/base52.go000066400000000000000000000030071505422223000160300ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package base52 import ( "fmt" "strings" ) const ( space = "0123456789bcdfghjkmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ" base = len(space) ) // Encode returns a string by encoding the id over a 51 characters space func Encode(id int64) string { var short []byte for id > 0 { i := id % int64(base) short = append(short, space[i]) id /= int64(base) } for i, j := 0, len(short)-1; i < j; i, j = i+1, j-1 { short[i], short[j] = short[j], short[i] } return string(short) } // Decode will decode the string and return the id // The input string should be a valid one with only characters in the space func Decode(encoded string) (int64, error) { if len(encoded) != len([]rune(encoded)) { return 0, fmt.Errorf("invalid encoded string: '%s'", encoded) } var id int64 for i := 0; i < len(encoded); i++ { idx := strings.IndexByte(space, encoded[i]) if idx < 0 { return 0, fmt.Errorf("invalid encoded string: '%s' contains invalid character", encoded) } id = id*int64(base) + int64(idx) } return id, nil } tiup-1.16.3/pkg/base52/base52_test.go000066400000000000000000000016251505422223000170730ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package base52 import ( "testing" "github.com/stretchr/testify/require" ) func TestEncode(t *testing.T) { require.Equal(t, "2TPzw7", Encode(1000000000)) } func TestDecode(t *testing.T) { decoded, err := Decode("2TPzw7") require.Equal(t, int64(1000000000), decoded) require.Nil(t, err) decoded, err = Decode("../../etc/passwd") require.Equal(t, int64(0), decoded) require.NotNil(t, err) } tiup-1.16.3/pkg/checkpoint/000077500000000000000000000000001505422223000154665ustar00rootroot00000000000000tiup-1.16.3/pkg/checkpoint/checkpoint.go000066400000000000000000000135301505422223000201460ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package checkpoint import ( "bufio" "context" "encoding/json" "io" "os" "runtime" "strings" "sync/atomic" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/version" "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/sync/semaphore" ) type contextKey string const ( semKey = contextKey("CHECKPOINT_SEMAPHORE") goroutineKey = contextKey("CHECKPOINT_GOROUTINE") funcKey = "__func__" hashKey = "__hash__" // At most 10M for each line in audit log maxTokenSize = 10 * 1024 * 1024 ) var ( checkpoint *CheckPoint // DebugCheckpoint is a switch used to debug if: // - The context passed to checkpoint is generated by checkpoint.NewContext // - multilple context acquire applied to the same context belone to the sampe goroutine DebugCheckpoint = os.Getenv("DEBUG_CHECKPOINT") == "1" ) // SetCheckPoint set global checkpoint for replay func SetCheckPoint(file string) error { pointReader, err := os.Open(file) if err != nil { return errors.AddStack(err) } defer pointReader.Close() checkpoint, err = NewCheckPoint(pointReader) if err != nil { return err } return nil } // HasCheckPoint returns if SetCheckPoint has been called func HasCheckPoint() bool { return checkpoint != nil } // Acquire wraps CheckPoint.Acquire func Acquire(ctx context.Context, fs FieldSet, point map[string]any) *Point { if ctx.Value(goroutineKey) == nil || ctx.Value(semKey) == nil { if DebugCheckpoint { panic("the context passed to checkpoint.Acquire is not generated by checkpoint.NewContext") } zap.L().Debug("context missing for checkpoint, the result of replaying this operation may be unexpected!") ctx = NewContext(ctx) } // Check goroutine if we are in test gptr := ctx.Value(goroutineKey).(*goroutineLock) g := atomic.LoadUint64((*uint64)(gptr)) if g == 0 { atomic.StoreUint64((*uint64)(gptr), uint64(newGoroutineLock())) } else { goroutineLock(g).check() } pc, _, _, _ := runtime.Caller(1) fn := runtime.FuncForPC(pc).Name() // If checkpoint is disabled, return a mock point if checkpoint == nil { return &Point{nil, fn, nil, true} } return checkpoint.acquire(ctx, fs, fn, point) } // NewContext wraps given context with value needed by checkpoint func NewContext(ctx context.Context) context.Context { switch { case ctx.Value(semKey) == nil: ctx = context.WithValue(ctx, semKey, semaphore.NewWeighted(1)) case ctx.Value(semKey).(*semaphore.Weighted).TryAcquire(1): defer ctx.Value(semKey).(*semaphore.Weighted).Release(1) ctx = context.WithValue(ctx, semKey, semaphore.NewWeighted(1)) default: ctx = context.WithValue(ctx, semKey, semaphore.NewWeighted(0)) } return context.WithValue(ctx, goroutineKey, new(goroutineLock)) } // CheckPoint provides the ability to recover from a failed command at the failpoint type CheckPoint struct { points []map[string]any } // NewCheckPoint returns a CheckPoint by given audit file func NewCheckPoint(r io.Reader) (*CheckPoint, error) { cp := CheckPoint{points: make([]map[string]any, 0)} scanner := bufio.NewScanner(r) scanner.Buffer(nil, maxTokenSize) for scanner.Scan() { line := scanner.Text() m, err := checkLine(line) if err != nil { return nil, errors.Annotate(err, "initial checkpoint failed") } if m == nil { continue } cp.points = append(cp.points, m) } if err := scanner.Err(); err != nil { return nil, errors.Annotate(err, "failed to parse audit file %s") } return &cp, nil } // Acquire get point from checkpoints func (c *CheckPoint) acquire(ctx context.Context, fs FieldSet, fn string, point map[string]any) *Point { acquired := ctx.Value(semKey).(*semaphore.Weighted).TryAcquire(1) point[funcKey] = fn point[hashKey] = version.GitHash next_point: for _, p := range c.points { for _, cf := range fs.Slice() { if cf.eq == nil { continue } if !contains(p, cf.field) || !contains(point, cf.field) || !cf.eq(p[cf.field], point[cf.field]) { continue next_point } } return &Point{ctx, fn, p, acquired} } return &Point{ctx, fn, nil, acquired} } // Point is a point of checkpoint type Point struct { ctx context.Context fn string point map[string]any acquired bool } // Hit returns value of the point, it will be nil if not hit. func (p *Point) Hit() map[string]any { return p.point } // Release write checkpoint into log file func (p *Point) Release(err error, fields ...zapcore.Field) { logfn := zap.L().Info if err != nil { logfn = zap.L().Error fields = append(fields, zap.Error(err)) } fields = append(fields, zap.String(hashKey, version.GitHash), zap.String(funcKey, p.fn), zap.Bool("hit", p.Hit() != nil)) if p.acquired { logfn("CheckPoint", fields...) // If checkpoint is disabled, the p.ctx will be nil if p.ctx != nil { p.ctx.Value(semKey).(*semaphore.Weighted).Release(1) } } } func checkLine(line string) (map[string]any, error) { // target log format: // 2021-01-13T14:11:02.987+0800 INFO SCPCommand {k:v...} // 2021-01-13T14:11:03.780+0800 INFO SSHCommand {k:v...} ss := strings.Fields(line) pos := strings.Index(line, "{") if len(ss) < 4 || ss[1] != "INFO" || ss[2] != "CheckPoint" || pos == -1 { return nil, nil } m := make(map[string]any) if err := json.Unmarshal([]byte(line[pos:]), &m); err != nil { return nil, errors.AddStack(err) } return m, nil } func contains(m map[string]any, f string) bool { _, ok := m[f] return ok } tiup-1.16.3/pkg/checkpoint/checkpoint_test.go000066400000000000000000000151551505422223000212120ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package checkpoint import ( "context" "fmt" "reflect" "strings" "testing" "github.com/stretchr/testify/require" ) var ( sshCmd FieldSet scpCmd FieldSet ) func setup() { DebugCheckpoint = true // register checkpoint for ssh command sshCmd = Register( Field("host", reflect.DeepEqual), Field("port", func(a, b any) bool { return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", b) }), Field("user", reflect.DeepEqual), Field("sudo", reflect.DeepEqual), Field("cmd", reflect.DeepEqual), ) // register checkpoint for scp command scpCmd = Register( Field("host", reflect.DeepEqual), Field("port", func(a, b any) bool { return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", b) }), Field("user", reflect.DeepEqual), Field("src", reflect.DeepEqual), Field("dst", reflect.DeepEqual), Field("download", reflect.DeepEqual), ) } func TestCheckPointSimple(t *testing.T) { setup() assert := require.New(t) r := strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "22", "sudo": false, "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": "", "__func__": "test", "__hash__": "Unknown"}`) c, err := NewCheckPoint(r) assert.Nil(err) ctx := NewContext(context.Background()) p := c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "cmd": "test cmd", "sudo": false, }) assert.NotNil(p.Hit()) assert.Equal(p.Hit()["stdout"], "success") assert.True(p.acquired) p1 := c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.139", }) assert.Nil(p1.Hit()) assert.False(p1.acquired) p1.Release(nil) p2 := c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.138", }) assert.Nil(p2.Hit()) assert.False(p2.acquired) p1.Release(nil) p.Release(nil) p3 := c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.137", }) assert.Nil(p3.Hit()) assert.True(p3.acquired) p3.Release(nil) } func TestCheckPointMultiple(t *testing.T) { setup() assert := require.New(t) r := strings.NewReader(` 2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "22", "user": "tidb", "cmd": "test cmd", "sudo": false, "stdout": "success", "stderr": "", "__func__": "test", "__hash__": "Unknown"} 2021-01-14T12:17:32.222+0800 DEBUG Environment variables {"env": ["TIUP_HOME=/home/tidb/.tiup"} 2021-01-14T12:17:33.579+0800 INFO Execute command {"command": "tiup cluster deploy test v4.0.9 /Users/joshua/test.yaml"} 2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.141", "port": "22", "user": "tidb", "src": "src", "dst": "dst", "download": false, "__func__": "test", "__hash__": "Unknown"} `) c, err := NewCheckPoint(r) assert.Nil(err) ctx := NewContext(context.Background()) p := c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "sudo": false, "cmd": "test cmd", }) assert.NotNil(p.Hit()) assert.Equal(p.Hit()["stdout"], "success") p.Release(nil) p = c.acquire(ctx, scpCmd, "test", map[string]any{ "host": "172.16.5.141", "port": 22, "user": "tidb", "src": "src", "dst": "dst", "download": false, }) assert.NotNil(p.Hit()) p.Release(nil) } func TestCheckPointNil(t *testing.T) { setup() assert := require.New(t) // With wrong log level r := strings.NewReader(`2021-01-14T12:16:54.579+0800 ERROR CheckPoint {"host": "172.16.5.140", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`) c, err := NewCheckPoint(r) assert.Nil(err) ctx := NewContext(context.Background()) p := c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "cmd": "test cmd", }) assert.Nil(p.Hit()) p.Release(nil) // With wrong log title r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO XXXCommand {"host": "172.16.5.140", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`) c, err = NewCheckPoint(r) assert.Nil(err) p = c.acquire(ctx, scpCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "cmd": "test cmd", }) assert.Nil(p.Hit()) p.Release(nil) // With wrong log host r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.141", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`) c, err = NewCheckPoint(r) assert.Nil(err) p = c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "cmd": "test cmd", }) assert.Nil(p.Hit()) p.Release(nil) // With wrong port r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "23", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`) c, err = NewCheckPoint(r) assert.Nil(err) p = c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "cmd": "test cmd", }) assert.Nil(p.Hit()) p.Release(nil) // With wrong user r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "22", "user": "yidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`) c, err = NewCheckPoint(r) assert.Nil(err) p = c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "cmd": "test cmd", }) assert.Nil(p.Hit()) p.Release(nil) // With wrong cmd r = strings.NewReader(`2021-01-14T12:16:54.579+0800 INFO CheckPoint {"host": "172.16.5.140", "port": "22", "user": "tidb", "cmd": "test cmd", "stdout": "success", "stderr": ""}`) c, err = NewCheckPoint(r) assert.Nil(err) p = c.acquire(ctx, sshCmd, "test", map[string]any{ "host": "172.16.5.140", "port": 22, "user": "tidb", "cmd": "test cmd", }) assert.Nil(p.Hit()) assert.True(p.acquired) p.Release(nil) } func TestCheckPointNotInited(t *testing.T) { setup() assert := require.New(t) assert.Panics(func() { Acquire(context.Background(), sshCmd, map[string]any{}) }) } tiup-1.16.3/pkg/checkpoint/field.go000066400000000000000000000031221505422223000170760ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package checkpoint import ( "reflect" "github.com/pingcap/tiup/pkg/set" ) // FieldSet is an ordered set stores fields and comparators, it's []CheckField type FieldSet struct { set.AnySet } // Slice returns CheckField slice for iteration func (fs *FieldSet) Slice() []CheckField { slice := []CheckField{} for _, f := range fs.AnySet.Slice() { slice = append(slice, f.(CheckField)) } return slice } // CheckField is a field that should be checked type CheckField struct { field string eq func(any, any) bool } // Field returns new CheckField func Field(name string, eq func(any, any) bool) CheckField { return CheckField{name, eq} } // Register register FieldSet to global for latter comparing // If there are two fields with the same name, the first one will // be used func Register(fields ...CheckField) FieldSet { s := set.NewAnySet(func(a, b any) bool { return a.(CheckField).field == b.(CheckField).field }) fields = append(fields, Field(hashKey, reflect.DeepEqual), Field(funcKey, reflect.DeepEqual)) for _, f := range fields { s.Insert(f) } return FieldSet{*s} } tiup-1.16.3/pkg/checkpoint/gotrack.go000066400000000000000000000065631505422223000174610ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Defensive debug-only utility to track that functions run on the // goroutine that they're supposed to. package checkpoint import ( "bytes" "errors" "fmt" "runtime" "strconv" "sync" ) type goroutineLock uint64 func newGoroutineLock() goroutineLock { if !DebugCheckpoint { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { if !DebugCheckpoint { return } if curGoroutineID() != uint64(g) { panic("running on the wrong goroutine") } } var goroutineSpace = []byte("goroutine ") func curGoroutineID() uint64 { bp := littleBuf.Get().(*[]byte) defer littleBuf.Put(bp) b := *bp b = b[:runtime.Stack(b, false)] // Parse the 4707 out of "goroutine 4707 [" b = bytes.TrimPrefix(b, goroutineSpace) i := bytes.IndexByte(b, ' ') if i < 0 { panic(fmt.Sprintf("No space found in %q", b)) } b = b[:i] n, err := parseUintBytes(b, 10, 64) if err != nil { panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) } return n } var littleBuf = sync.Pool{ New: func() any { buf := make([]byte, 64) return &buf }, } // parseUintBytes is like strconv.ParseUint, but using a []byte. func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { var cutoff, maxVal uint64 if bitSize == 0 { bitSize = int(strconv.IntSize) } s0 := s switch { case len(s) < 1: err = strconv.ErrSyntax goto Error case 2 <= base && base <= 36: // valid base; nothing to do case base == 0: // Look for octal, hex prefix. switch { case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): base = 16 s = s[2:] if len(s) < 1 { err = strconv.ErrSyntax goto Error } case s[0] == '0': base = 8 default: base = 10 } default: err = errors.New("invalid base " + strconv.Itoa(base)) goto Error } n = 0 cutoff = cutoff64(base) maxVal = 1<= base { n = 0 err = strconv.ErrSyntax goto Error } if n >= cutoff { // n*base overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n *= uint64(base) n1 := n + uint64(v) if n1 < n || n1 > maxVal { // n+v overflows n = 1<<64 - 1 err = strconv.ErrRange goto Error } n = n1 } return n, nil Error: return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} } // Return the first number n such that n*base >= 1<<64. func cutoff64(base int) uint64 { if base < 2 { return 0 } return (1<<64-1)/uint64(base) + 1 } tiup-1.16.3/pkg/cluster/000077500000000000000000000000001505422223000150205ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/ansible/000077500000000000000000000000001505422223000164355ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/ansible/config.go000066400000000000000000000106451505422223000202370ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "context" "fmt" "path/filepath" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/tui" ) // ImportConfig copies config files from cluster which deployed through tidb-ansible func ImportConfig(ctx context.Context, name string, clsMeta *spec.ClusterMeta, gOpt operator.Options) error { // there may be already cluster dir, skip create // if err := utils.MkdirAll(meta.ClusterPath(name), 0755); err != nil { // return err // } // if err := utils.WriteFile(meta.ClusterPath(name, "topology.yaml"), yamlFile, 0664); err != nil { // return err // } logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) var sshProxyProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} if gOpt.SSHType != executor.SSHTypeNone && len(gOpt.SSHProxyHost) != 0 { var err error if sshProxyProps, err = tui.ReadIdentityFileOrPassword(gOpt.SSHProxyIdentity, gOpt.SSHProxyUsePassword); err != nil { return err } } var copyFileTasks []task.Task for _, comp := range clsMeta.Topology.ComponentsByStartOrder() { logger.Infof("Copying config file(s) of %s...", comp.Name()) for _, inst := range comp.Instances() { switch inst.ComponentName() { case spec.ComponentPD, spec.ComponentTiKV, spec.ComponentPump, spec.ComponentTiDB, spec.ComponentDrainer: t := task.NewBuilder(logger). SSHKeySet( spec.ClusterPath(name, "ssh", "id_rsa"), spec.ClusterPath(name, "ssh", "id_rsa.pub")). UserSSH( inst.GetManageHost(), inst.GetSSHPort(), clsMeta.User, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, sshProxyProps.Password, sshProxyProps.IdentityFile, sshProxyProps.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, "", ). CopyFile(filepath.Join(inst.DeployDir(), "conf", inst.ComponentName()+".toml"), spec.ClusterPath(name, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", inst.ComponentName(), inst.GetHost(), inst.GetPort())), inst.GetManageHost(), true, 0, false). Build() copyFileTasks = append(copyFileTasks, t) case spec.ComponentTiFlash: t := task.NewBuilder(logger). SSHKeySet( spec.ClusterPath(name, "ssh", "id_rsa"), spec.ClusterPath(name, "ssh", "id_rsa.pub")). UserSSH( inst.GetManageHost(), inst.GetSSHPort(), clsMeta.User, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, sshProxyProps.Password, sshProxyProps.IdentityFile, sshProxyProps.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, "", ). CopyFile(filepath.Join(inst.DeployDir(), "conf", inst.ComponentName()+".toml"), spec.ClusterPath(name, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", inst.ComponentName(), inst.GetHost(), inst.GetPort())), inst.GetManageHost(), true, 0, false). CopyFile(filepath.Join(inst.DeployDir(), "conf", inst.ComponentName()+"-learner.toml"), spec.ClusterPath(name, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-learner-%s-%d.toml", inst.ComponentName(), inst.GetHost(), inst.GetPort())), inst.GetManageHost(), true, 0, false). Build() copyFileTasks = append(copyFileTasks, t) } } } t := task.NewBuilder(logger). Parallel(false, copyFileTasks...). Build() if err := t.Execute(ctx); err != nil { return errors.Trace(err) } logger.Infof("Finished copying configs.") return nil } tiup-1.16.3/pkg/cluster/ansible/import.go000066400000000000000000000232141505422223000203000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "context" "fmt" "io" "io/fs" "os" "path/filepath" "reflect" "strconv" "strings" "github.com/BurntSushi/toml" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" "github.com/relex/aini" ) // ReadInventory reads the inventory files of a TiDB cluster deployed by TiDB-Ansible func ReadInventory(ctx context.Context, dir, inventoryFileName string) (string, *spec.ClusterMeta, *aini.InventoryData, error) { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) if inventoryFileName == "" { inventoryFileName = AnsibleInventoryFile } inventoryFile, err := os.Open(filepath.Join(dir, inventoryFileName)) if err != nil { return "", nil, nil, err } defer inventoryFile.Close() logger.Infof("Found inventory file %s, parsing...", inventoryFile.Name()) clsName, clsMeta, inventory, err := parseInventoryFile(inventoryFile) if err != nil { return "", nil, inventory, err } logger.Infof("Found cluster \"%s\" (%s), deployed with user %s.", clsName, clsMeta.Version, clsMeta.User) return clsName, clsMeta, inventory, err } func parseInventoryFile(invFile io.Reader) (string, *spec.ClusterMeta, *aini.InventoryData, error) { inventory, err := aini.Parse(invFile) if err != nil { return "", nil, inventory, err } clsMeta := &spec.ClusterMeta{ Topology: &spec.Specification{ GlobalOptions: spec.GlobalOptions{ Arch: "amd64", }, MonitoredOptions: spec.MonitoredOptions{}, TiDBServers: make([]*spec.TiDBSpec, 0), TiKVServers: make([]*spec.TiKVSpec, 0), PDServers: make([]*spec.PDSpec, 0), TiFlashServers: make([]*spec.TiFlashSpec, 0), PumpServers: make([]*spec.PumpSpec, 0), Drainers: make([]*spec.DrainerSpec, 0), Monitors: make([]*spec.PrometheusSpec, 0), Grafanas: make([]*spec.GrafanaSpec, 0), Alertmanagers: make([]*spec.AlertmanagerSpec, 0), }, } clsName := "" // get global vars grp, ok := inventory.Groups["all"] if !ok || len(grp.Hosts) == 0 { return "", nil, inventory, errors.New("no available host in the inventory file") } // set global variables clsName = grp.Vars["cluster_name"] clsMeta.User = grp.Vars["ansible_user"] clsMeta.Topology.GlobalOptions.User = clsMeta.User clsMeta.Version = grp.Vars["tidb_version"] clsMeta.Topology.GlobalOptions.DeployDir = grp.Vars["deploy_dir"] // deploy_dir and data_dir of monitored need to be set, otherwise they will be // subdirs of deploy_dir in global options allSame := uniqueVar("deploy_dir", inventory.Groups["monitored_servers"].Hosts) if len(allSame) == 1 { clsMeta.Topology.MonitoredOptions.DeployDir = allSame[0] clsMeta.Topology.MonitoredOptions.DataDir = filepath.Join( clsMeta.Topology.MonitoredOptions.DeployDir, "data", ) } else { clsMeta.Topology.MonitoredOptions.DeployDir = clsMeta.Topology.GlobalOptions.DeployDir clsMeta.Topology.MonitoredOptions.DataDir = filepath.Join( clsMeta.Topology.MonitoredOptions.DeployDir, "data", ) } if grp.Vars["process_supervision"] != "systemd" { return "", nil, inventory, errors.New("only support cluster deployed with systemd") } if enableBinlog, err := strconv.ParseBool(grp.Vars["enable_binlog"]); err == nil && enableBinlog { if clsMeta.Topology.ServerConfigs.TiDB == nil { clsMeta.Topology.ServerConfigs.TiDB = make(map[string]any) } clsMeta.Topology.ServerConfigs.TiDB["binlog.enable"] = enableBinlog } return clsName, clsMeta, inventory, err } // SSHKeyPath gets the path to default SSH private key, this is the key Ansible // uses to connect deployment servers func SSHKeyPath() string { homeDir, err := os.UserHomeDir() if err != nil { return "" } return fmt.Sprintf("%s/.ssh/id_rsa", homeDir) } func uniqueVar(key string, hosts map[string]*aini.Host) []string { vars := set.NewStringSet() for _, h := range hosts { vars.Insert(h.Vars[key]) } return vars.Slice() } // parse config files func parseConfigFile(cfgfile string) (map[string]any, error) { srvConfigs := make(map[string]any) if _, err := toml.DecodeFile(cfgfile, &srvConfigs); err != nil { return nil, errors.Annotate(err, "decode toml file") } return spec.FlattenMap(srvConfigs), nil } func diffConfigs(configs []map[string]any) (global map[string]any, locals []map[string]any) { global = make(map[string]any) keySet := set.NewStringSet() // parse all configs from file for _, config := range configs { locals = append(locals, config) for k := range config { keySet.Insert(k) } } // summary global config for k := range keySet { valSet := set.NewAnySet(reflect.DeepEqual) for _, config := range locals { valSet.Insert(config[k]) } if len(valSet.Slice()) > 1 { // this key can't be put into global continue } global[k] = valSet.Slice()[0] } // delete global config from local for _, config := range locals { for k := range global { delete(config, k) } } return } // CommentConfig add `#` to the head of each lines for imported configs func CommentConfig(clsName string) error { dir := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath) err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { if err != nil || info.IsDir() || !strings.HasSuffix(info.Name(), ".toml") { return nil } content, err := os.ReadFile(path) if err != nil { return errors.Annotatef(err, "read config file %s", path) } lines := strings.Split(string(content), "\n") for idx := range lines { lines[idx] = "# " + lines[idx] } if err := utils.WriteFile(path, []byte(strings.Join(lines, "\n")), 0644); err != nil { return errors.Annotatef(err, "write config file %s", path) } return nil }) return errors.Annotate(err, "comment imported config") } // LoadConfig files to clusterMeta, include tidbservers, tikvservers, pdservers pumpservers and drainerservers func LoadConfig(clsName string, cls *spec.ClusterMeta) error { // deal with tidb config configs := []map[string]any{} for _, srv := range cls.Topology.TiDBServers { prefixkey := spec.ComponentTiDB fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) config, err := parseConfigFile(fname) if err != nil { return err } configs = append(configs, config) } global, locals := diffConfigs(configs) cls.Topology.ServerConfigs.TiDB = spec.MergeConfig(cls.Topology.ServerConfigs.TiDB, global) for i, local := range locals { cls.Topology.TiDBServers[i].Config = spec.MergeConfig(cls.Topology.TiDBServers[i].Config, local) } // deal with tikv config configs = []map[string]any{} for _, srv := range cls.Topology.TiKVServers { prefixkey := spec.ComponentTiKV fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) config, err := parseConfigFile(fname) if err != nil { return err } configs = append(configs, config) } global, locals = diffConfigs(configs) cls.Topology.ServerConfigs.TiKV = spec.MergeConfig(cls.Topology.ServerConfigs.TiKV, global) for i, local := range locals { cls.Topology.TiKVServers[i].Config = spec.MergeConfig(cls.Topology.TiKVServers[i].Config, local) } // deal with pd config configs = []map[string]any{} for _, srv := range cls.Topology.PDServers { prefixkey := spec.ComponentPD fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.ClientPort)) config, err := parseConfigFile(fname) if err != nil { return err } configs = append(configs, config) } global, locals = diffConfigs(configs) cls.Topology.ServerConfigs.PD = spec.MergeConfig(cls.Topology.ServerConfigs.PD, global) for i, local := range locals { cls.Topology.PDServers[i].Config = spec.MergeConfig(cls.Topology.PDServers[i].Config, local) } // deal with pump config configs = []map[string]any{} for _, srv := range cls.Topology.PumpServers { prefixkey := spec.ComponentPump fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) config, err := parseConfigFile(fname) if err != nil { return err } configs = append(configs, config) } global, locals = diffConfigs(configs) cls.Topology.ServerConfigs.Pump = spec.MergeConfig(cls.Topology.ServerConfigs.Pump, global) for i, local := range locals { cls.Topology.PumpServers[i].Config = spec.MergeConfig(cls.Topology.PumpServers[i].Config, local) } // deal with drainer config configs = []map[string]any{} for _, srv := range cls.Topology.Drainers { prefixkey := spec.ComponentDrainer fname := spec.ClusterPath(clsName, spec.AnsibleImportedConfigPath, fmt.Sprintf("%s-%s-%d.toml", prefixkey, srv.Host, srv.Port)) config, err := parseConfigFile(fname) if err != nil { return err } configs = append(configs, config) } global, locals = diffConfigs(configs) cls.Topology.ServerConfigs.Drainer = spec.MergeConfig(cls.Topology.ServerConfigs.Drainer, global) for i, local := range locals { cls.Topology.Drainers[i].Config = spec.MergeConfig(cls.Topology.Drainers[i].Config, local) } return nil } tiup-1.16.3/pkg/cluster/ansible/import_test.go000066400000000000000000000160011505422223000213330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "context" "fmt" "os" "path/filepath" "sort" "strings" "testing" "github.com/creasty/defaults" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestMonitoredDeployDir(t *testing.T) { r := strings.NewReader(` [monitored_servers] 172.16.10.1 172.16.10.2 172.16.10.3 [all:vars] process_supervision = systemd `) _, clsMeta, _, err := parseInventoryFile(r) require.NoError(t, err) require.Equal(t, "", clsMeta.Topology.MonitoredOptions.DeployDir) r = strings.NewReader(` [monitored_servers] 172.16.10.1 172.16.10.2 172.16.10.3 [all:vars] deploy_dir = /data1/deploy process_supervision = systemd `) _, clsMeta, _, err = parseInventoryFile(r) require.NoError(t, err) require.Equal(t, "/data1/deploy", clsMeta.Topology.MonitoredOptions.DeployDir) r = strings.NewReader(` [monitored_servers] 172.16.10.1 deploy_dir=/data/deploy 172.16.10.2 deploy_dir=/data/deploy 172.16.10.3 deploy_dir=/data/deploy [all:vars] deploy_dir = /data1/deploy process_supervision = systemd `) _, clsMeta, _, err = parseInventoryFile(r) require.NoError(t, err) require.Equal(t, "/data/deploy", clsMeta.Topology.MonitoredOptions.DeployDir) r = strings.NewReader(` [monitored_servers] 172.16.10.1 deploy_dir=/data/deploy1 172.16.10.2 deploy_dir=/data/deploy2 172.16.10.3 deploy_dir=/data/deploy3 [all:vars] deploy_dir = /data1/deploy process_supervision = systemd `) _, clsMeta, _, err = parseInventoryFile(r) require.NoError(t, err) require.Equal(t, "/data1/deploy", clsMeta.Topology.MonitoredOptions.DeployDir) } func TestParseInventoryFile(t *testing.T) { dir := "test-data" invData, err := os.Open(filepath.Join(dir, "inventory.ini")) require.NoError(t, err) clsName, clsMeta, inv, err := parseInventoryFile(invData) require.NoError(t, err) require.NotNil(t, inv) require.Equal(t, "ansible-cluster", clsName) require.NotNil(t, clsMeta) require.Equal(t, "v3.0.12", clsMeta.Version) require.Equal(t, "tiops", clsMeta.User) expected := []byte(`global: user: tiops deploy_dir: /home/tiopsimport/ansible-deploy arch: amd64 monitored: deploy_dir: /home/tiopsimport/ansible-deploy data_dir: /home/tiopsimport/ansible-deploy/data server_configs: tidb: binlog.enable: true tikv: {} pd: {} tso: {} scheduling: {} tidb_dashboard: {} tiflash: {} tiproxy: {} tiflash-learner: {} pump: {} drainer: {} cdc: {} kvcdc: {} grafana: {} tidb_servers: [] tikv_servers: [] tiflash_servers: [] tiproxy_servers: [] pd_servers: [] monitoring_servers: [] `) topo, err := yaml.Marshal(clsMeta.Topology) require.NoError(t, err) require.Equal(t, string(expected), string(topo)) } func TestParseGroupVars(t *testing.T) { dir := "test-data" ansCfgFile := filepath.Join(dir, "ansible.cfg") invData, err := os.Open(filepath.Join(dir, "inventory.ini")) require.NoError(t, err) _, clsMeta, inv, err := parseInventoryFile(invData) require.NoError(t, err) err = parseGroupVars(context.WithValue( context.TODO(), logprinter.ContextKeyLogger, logprinter.NewLogger(""), ), dir, ansCfgFile, clsMeta, inv) require.NoError(t, err) err = defaults.Set(clsMeta) require.NoError(t, err) var expected spec.ClusterMeta var metaFull spec.ClusterMeta expectedTopo, err := os.ReadFile(filepath.Join(dir, "meta.yaml")) require.NoError(t, err) err = yaml.Unmarshal(expectedTopo, &expected) require.NoError(t, err) // marshal and unmarshal the meta to ensure custom defaults are populated meta, err := yaml.Marshal(clsMeta) require.NoError(t, err) err = yaml.Unmarshal(meta, &metaFull) require.NoError(t, err) sortClusterMeta(&metaFull) sortClusterMeta(&expected) _, err = yaml.Marshal(metaFull) require.NoError(t, err) require.Equal(t, expected, metaFull) } func sortClusterMeta(clsMeta *spec.ClusterMeta) { sort.Slice(clsMeta.Topology.TiDBServers, func(i, j int) bool { return clsMeta.Topology.TiDBServers[i].Host < clsMeta.Topology.TiDBServers[j].Host }) sort.Slice(clsMeta.Topology.TiKVServers, func(i, j int) bool { return clsMeta.Topology.TiKVServers[i].Host < clsMeta.Topology.TiKVServers[j].Host }) sort.Slice(clsMeta.Topology.TiFlashServers, func(i, j int) bool { return clsMeta.Topology.TiFlashServers[i].Host < clsMeta.Topology.TiFlashServers[j].Host }) sort.Slice(clsMeta.Topology.PDServers, func(i, j int) bool { return clsMeta.Topology.PDServers[i].Host < clsMeta.Topology.PDServers[j].Host }) sort.Slice(clsMeta.Topology.PumpServers, func(i, j int) bool { return clsMeta.Topology.PumpServers[i].Host < clsMeta.Topology.PumpServers[j].Host }) sort.Slice(clsMeta.Topology.Drainers, func(i, j int) bool { return clsMeta.Topology.Drainers[i].Host < clsMeta.Topology.Drainers[j].Host }) sort.Slice(clsMeta.Topology.CDCServers, func(i, j int) bool { return clsMeta.Topology.CDCServers[i].Host < clsMeta.Topology.CDCServers[j].Host }) sort.Slice(clsMeta.Topology.Monitors, func(i, j int) bool { return clsMeta.Topology.Monitors[i].Host < clsMeta.Topology.Monitors[j].Host }) sort.Slice(clsMeta.Topology.Grafanas, func(i, j int) bool { return clsMeta.Topology.Grafanas[i].Host < clsMeta.Topology.Grafanas[j].Host }) sort.Slice(clsMeta.Topology.Alertmanagers, func(i, j int) bool { return clsMeta.Topology.Alertmanagers[i].Host < clsMeta.Topology.Alertmanagers[j].Host }) } func withTempFile(content string, fn func(string)) { file, err := os.CreateTemp("/tmp", "topology-test") if err != nil { panic(fmt.Sprintf("create temp file: %s", err)) } defer os.Remove(file.Name()) _, err = file.WriteString(content) if err != nil { panic(fmt.Sprintf("write temp file: %s", err)) } file.Close() fn(file.Name()) } func TestParseConfig(t *testing.T) { // base test withTempFile(` a = true [b] c = 1 d = "\"" `, func(file string) { m, err := parseConfigFile(file) require.NoError(t, err) require.Nil(t, m["x"]) require.Equal(t, true, m["a"]) require.Equal(t, int64(1), m["b.c"]) require.Equal(t, "\"", m["b.d"]) }) } func TestDiffConfig(t *testing.T) { global, locals := diffConfigs([]map[string]any{ { "a": true, "b": 1, "foo.bar": 1, }, { "a": true, "b": 2, "foo.bar": 1, }, { "a": true, "b": 3, "foo.bar": 1, }, }) require.NotNil(t, global["a"]) require.Nil(t, global["b"]) require.Equal(t, true, global["a"]) require.Equal(t, 1, global["foo.bar"]) require.Equal(t, 1, locals[0]["b"]) require.Equal(t, 2, locals[1]["b"]) require.Equal(t, 3, locals[2]["b"]) } tiup-1.16.3/pkg/cluster/ansible/inventory.go000066400000000000000000000477411505422223000210360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "context" "os" "path/filepath" "strconv" "strings" "github.com/creasty/defaults" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/tidbver" "github.com/relex/aini" "gopkg.in/ini.v1" "gopkg.in/yaml.v3" ) var ( // AnsibleInventoryFile is the default inventory file name AnsibleInventoryFile = "inventory.ini" // AnsibleConfigFile is the default ansible config file name AnsibleConfigFile = "ansible.cfg" groupVarsGlobal = "group_vars/all.yml" groupVarsTiDB = "group_vars/tidb_servers.yml" groupVarsTiKV = "group_vars/tikv_servers.yml" groupVarsPD = "group_vars/pd_servers.yml" groupVarsTiFlash = "group_vars/tiflash_servers.yml" // groupVarsPump = "group_vars/pump_servers.yml" // groupVarsDrainer = "group_vars/drainer_servers.yml" groupVarsAlertManager = "group_vars/alertmanager_servers.yml" groupVarsGrafana = "group_vars/grafana_servers.yml" // groupVarsMonitorAgent = "group_vars/monitored_servers.yml" groupVarsPrometheus = "group_vars/monitoring_servers.yml" // groupVarsLightning = "group_vars/lightning_server.yml" // groupVarsImporter = "group_vars/importer_server.yml" ) // ParseAndImportInventory builds a basic ClusterMeta from the main Ansible inventory func ParseAndImportInventory(ctx context.Context, dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini.InventoryData, sshTimeout uint64, sshType executor.SSHType) error { if err := parseGroupVars(ctx, dir, ansCfgFile, clsMeta, inv); err != nil { return err } for i := 0; i < len(clsMeta.Topology.TiDBServers); i++ { s := clsMeta.Topology.TiDBServers[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.TiDBServers[i] = ins.(*spec.TiDBSpec) } for i := 0; i < len(clsMeta.Topology.TiKVServers); i++ { s := clsMeta.Topology.TiKVServers[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.TiKVServers[i] = ins.(*spec.TiKVSpec) } for i := 0; i < len(clsMeta.Topology.PDServers); i++ { s := clsMeta.Topology.PDServers[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.PDServers[i] = ins.(*spec.PDSpec) } for i := 0; i < len(clsMeta.Topology.TiFlashServers); i++ { s := clsMeta.Topology.TiFlashServers[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.TiFlashServers[i] = ins.(*spec.TiFlashSpec) } for i := 0; i < len(clsMeta.Topology.PumpServers); i++ { s := clsMeta.Topology.PumpServers[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.PumpServers[i] = ins.(*spec.PumpSpec) } for i := 0; i < len(clsMeta.Topology.Drainers); i++ { s := clsMeta.Topology.Drainers[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.Drainers[i] = ins.(*spec.DrainerSpec) } for i := 0; i < len(clsMeta.Topology.Monitors); i++ { s := clsMeta.Topology.Monitors[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.Monitors[i] = ins.(*spec.PrometheusSpec) } for i := 0; i < len(clsMeta.Topology.Alertmanagers); i++ { s := clsMeta.Topology.Alertmanagers[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.Alertmanagers[i] = ins.(*spec.AlertmanagerSpec) } for i := 0; i < len(clsMeta.Topology.Grafanas); i++ { s := clsMeta.Topology.Grafanas[i] ins, err := parseDirs(ctx, clsMeta.User, s, sshTimeout, sshType) if err != nil { return err } clsMeta.Topology.Grafanas[i] = ins.(*spec.GrafanaSpec) } // TODO: get values from templates of roles to overwrite defaults return defaults.Set(clsMeta) } //revive:disable func parseGroupVars(ctx context.Context, dir, ansCfgFile string, clsMeta *spec.ClusterMeta, inv *aini.InventoryData) error { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) // set global vars in group_vars/all.yml grpVarsAll, err := readGroupVars(dir, groupVarsGlobal) if err != nil { return err } if port, ok := grpVarsAll["blackbox_exporter_port"]; ok { clsMeta.Topology.MonitoredOptions.BlackboxExporterPort, _ = strconv.Atoi(port) } if port, ok := grpVarsAll["node_exporter_port"]; ok { clsMeta.Topology.MonitoredOptions.NodeExporterPort, _ = strconv.Atoi(port) } // read ansible config ansCfg, err := readAnsibleCfg(ansCfgFile) if err != nil { return err } // try to set value in global ansible config to global options // NOTE: we read this value again when setting port for each host, because the // default host read from aini might be different from the one in ansible config if ansCfg != nil { rPort, err := ansCfg.Section("defaults").Key("remote_port").Int() if err == nil { clsMeta.Topology.GlobalOptions.SSHPort = rPort } } // set hosts // tidb_servers if grp, ok := inv.Groups["tidb_servers"]; ok && len(grp.Hosts) > 0 { grpVars, err := readGroupVars(dir, groupVarsTiDB) if err != nil { return err } for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.TiDBSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } if port, ok := grpVars["tidb_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } if statusPort, ok := grpVars["tidb_status_port"]; ok { tmpIns.StatusPort, _ = strconv.Atoi(statusPort) } // apply values from the host if port, ok := srv.Vars["tidb_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } if statusPort, ok := srv.Vars["tidb_status_port"]; ok { tmpIns.StatusPort, _ = strconv.Atoi(statusPort) } if logDir, ok := srv.Vars["tidb_log_dir"]; ok { tmpIns.LogDir = strings.Trim(logDir, "\"") } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.TiDBServers = append(clsMeta.Topology.TiDBServers, tmpIns) } logger.Infof("Imported %d TiDB node(s).", len(clsMeta.Topology.TiDBServers)) } // tikv_servers if grp, ok := inv.Groups["tikv_servers"]; ok && len(grp.Hosts) > 0 { grpVars, err := readGroupVars(dir, groupVarsTiKV) if err != nil { return err } for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.TiKVSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } if port, ok := grpVars["tikv_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } if statusPort, ok := grpVars["tikv_status_port"]; ok { tmpIns.StatusPort, _ = strconv.Atoi(statusPort) } // apply values from the host if port, ok := srv.Vars["tikv_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } if statusPort, ok := srv.Vars["tikv_status_port"]; ok { tmpIns.StatusPort, _ = strconv.Atoi(statusPort) } if dataDir, ok := srv.Vars["tikv_data_dir"]; ok { tmpIns.DataDir = strings.Trim(dataDir, "\"") } if logDir, ok := srv.Vars["tikv_log_dir"]; ok { tmpIns.LogDir = strings.Trim(logDir, "\"") } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.TiKVServers = append(clsMeta.Topology.TiKVServers, tmpIns) } logger.Infof("Imported %d TiKV node(s).", len(clsMeta.Topology.TiKVServers)) } // pd_servers if grp, ok := inv.Groups["pd_servers"]; ok && len(grp.Hosts) > 0 { grpVars, err := readGroupVars(dir, groupVarsPD) if err != nil { return err } for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.PDSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } if tmpIns.Host != srv.Name { tmpIns.Name = srv.Name // use alias as the name of PD } if clientPort, ok := grpVars["pd_client_port"]; ok { tmpIns.ClientPort, _ = strconv.Atoi(clientPort) } if peerPort, ok := grpVars["pd_peer_port"]; ok { tmpIns.PeerPort, _ = strconv.Atoi(peerPort) } // apply values from the host if clientPort, ok := srv.Vars["pd_client_port"]; ok { tmpIns.ClientPort, _ = strconv.Atoi(clientPort) } if peerPort, ok := srv.Vars["pd_peer_port"]; ok { tmpIns.PeerPort, _ = strconv.Atoi(peerPort) } if dataDir, ok := srv.Vars["pd_data_dir"]; ok { tmpIns.DataDir = strings.Trim(dataDir, "\"") } if logDir, ok := srv.Vars["pd_log_dir"]; ok { tmpIns.LogDir = strings.Trim(logDir, "\"") } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.PDServers = append(clsMeta.Topology.PDServers, tmpIns) } logger.Infof("Imported %d PD node(s).", len(clsMeta.Topology.PDServers)) } // tiflash_servers if grp, ok := inv.Groups["tiflash_servers"]; ok && len(grp.Hosts) > 0 { grpVars, err := readGroupVars(dir, groupVarsTiFlash) if err != nil { return err } for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.TiFlashSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } if tcpPort, ok := grpVars["tcp_port"]; ok { tmpIns.TCPPort, _ = strconv.Atoi(tcpPort) } if !tidbver.TiFlashNotNeedHTTPPortConfig(clsMeta.Version) { if httpPort, ok := grpVars["http_port"]; ok { tmpIns.HTTPPort, _ = strconv.Atoi(httpPort) } } if flashServicePort, ok := grpVars["flash_service_port"]; ok { tmpIns.FlashServicePort, _ = strconv.Atoi(flashServicePort) } if flashProxyPort, ok := grpVars["flash_proxy_port"]; ok { tmpIns.FlashProxyPort, _ = strconv.Atoi(flashProxyPort) } if flashProxyStatusPort, ok := grpVars["flash_proxy_status_port"]; ok { tmpIns.FlashProxyStatusPort, _ = strconv.Atoi(flashProxyStatusPort) } if statusPort, ok := grpVars["metrics_port"]; ok { tmpIns.StatusPort, _ = strconv.Atoi(statusPort) } // apply values from the host if tcpPort, ok := srv.Vars["tcp_port"]; ok { tmpIns.TCPPort, _ = strconv.Atoi(tcpPort) } if !tidbver.TiFlashNotNeedHTTPPortConfig(clsMeta.Version) { if httpPort, ok := srv.Vars["http_port"]; ok { tmpIns.HTTPPort, _ = strconv.Atoi(httpPort) } } if flashServicePort, ok := srv.Vars["flash_service_port"]; ok { tmpIns.FlashServicePort, _ = strconv.Atoi(flashServicePort) } if flashProxyPort, ok := srv.Vars["flash_proxy_port"]; ok { tmpIns.FlashProxyPort, _ = strconv.Atoi(flashProxyPort) } if flashProxyStatusPort, ok := srv.Vars["flash_proxy_status_port"]; ok { tmpIns.FlashProxyStatusPort, _ = strconv.Atoi(flashProxyStatusPort) } if statusPort, ok := srv.Vars["metrics_port"]; ok { tmpIns.StatusPort, _ = strconv.Atoi(statusPort) } if dataDir, ok := srv.Vars["data_dir"]; ok { tmpIns.DataDir = strings.Trim(dataDir, "\"") } if logDir, ok := srv.Vars["tiflash_log_dir"]; ok { tmpIns.LogDir = strings.Trim(logDir, "\"") } if tmpDir, ok := srv.Vars["tmp_path"]; ok { tmpIns.TmpDir = tmpDir } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.TiFlashServers = append(clsMeta.Topology.TiFlashServers, tmpIns) } logger.Infof("Imported %d TiFlash node(s).", len(clsMeta.Topology.TiFlashServers)) } // spark_master // spark_slaves // lightning_server // importer_server // monitoring_servers if grp, ok := inv.Groups["monitoring_servers"]; ok && len(grp.Hosts) > 0 { grpVars, err := readGroupVars(dir, groupVarsPrometheus) if err != nil { return err } for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.PrometheusSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } if port, ok := grpVars["prometheus_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } // pushgateway no longer needed, just ignore // NOTE: storage retention is not used at present, only for record if _retention, ok := grpVars["prometheus_storage_retention"]; ok { tmpIns.Retention = _retention } // apply values from the host if port, ok := srv.Vars["prometheus_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } // NOTE: storage retention is not used at present, only for record if _retention, ok := srv.Vars["prometheus_storage_retention"]; ok { tmpIns.Retention = _retention } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.Monitors = append(clsMeta.Topology.Monitors, tmpIns) } logger.Infof("Imported %d monitoring node(s).", len(clsMeta.Topology.Monitors)) } // monitored_servers // ^- ignore, we use auto generated full list // alertmanager_servers if grp, ok := inv.Groups["alertmanager_servers"]; ok && len(grp.Hosts) > 0 { grpVars, err := readGroupVars(dir, groupVarsAlertManager) if err != nil { return err } for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.AlertmanagerSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } if port, ok := grpVars["alertmanager_port"]; ok { tmpIns.WebPort, _ = strconv.Atoi(port) } if clusterPort, ok := grpVars["alertmanager_cluster_port"]; ok { tmpIns.ClusterPort, _ = strconv.Atoi(clusterPort) } // apply values from the host if port, ok := srv.Vars["alertmanager_port"]; ok { tmpIns.WebPort, _ = strconv.Atoi(port) } if clusterPort, ok := srv.Vars["alertmanager_cluster_port"]; ok { tmpIns.ClusterPort, _ = strconv.Atoi(clusterPort) } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.Alertmanagers = append(clsMeta.Topology.Alertmanagers, tmpIns) } logger.Infof("Imported %d Alertmanager node(s).", len(clsMeta.Topology.Alertmanagers)) } // grafana_servers if grp, ok := inv.Groups["grafana_servers"]; ok && len(grp.Hosts) > 0 { grpVars, err := readGroupVars(dir, groupVarsGrafana) if err != nil { return err } for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.GrafanaSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } if port, ok := grpVars["grafana_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } // apply values from the host if port, ok := srv.Vars["grafana_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } if username, ok := srv.Vars["grafana_admin_user"]; ok { tmpIns.Username = strings.Trim(username, "\"") } if passwd, ok := srv.Vars["grafana_admin_password"]; ok { tmpIns.Password = strings.Trim(passwd, "\"") } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.Grafanas = append(clsMeta.Topology.Grafanas, tmpIns) } logger.Infof("Imported %d Grafana node(s).", len(clsMeta.Topology.Grafanas)) } // pump_servers if grp, ok := inv.Groups["pump_servers"]; ok && len(grp.Hosts) > 0 { /* grpVars, err := readGroupVars(dir, groupVarsPump) if err != nil { return err } */ for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.PumpSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } // nothing in pump_servers.yml if port, ok := grpVarsAll["pump_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } // apply values from the host if port, ok := srv.Vars["pump_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } if dataDir, ok := srv.Vars["pump_data_dir"]; ok { tmpIns.DataDir = strings.Trim(dataDir, "\"") } if logDir, ok := srv.Vars["pump_log_dir"]; ok { tmpIns.LogDir = strings.Trim(logDir, "\"") } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.PumpServers = append(clsMeta.Topology.PumpServers, tmpIns) } logger.Infof("Imported %d Pump node(s).", len(clsMeta.Topology.PumpServers)) } // drainer_servers if grp, ok := inv.Groups["drainer_servers"]; ok && len(grp.Hosts) > 0 { /* grpVars, err := readGroupVars(dir, groupVarsDrainer) if err != nil { return err } */ for _, srv := range grp.Hosts { host := srv.Vars["ansible_host"] if host == "" { host = srv.Name } tmpIns := &spec.DrainerSpec{ Host: host, SSHPort: GetHostPort(srv, ansCfg), Imported: true, Arch: "amd64", OS: "linux", } // nothing in drainer_servers.yml if port, ok := grpVarsAll["drainer_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } // apply values from the host if port, ok := srv.Vars["drainer_port"]; ok { tmpIns.Port, _ = strconv.Atoi(port) } logger.Debugf("Imported %s node %s:%d.", tmpIns.Role(), tmpIns.Host, tmpIns.GetMainPort()) clsMeta.Topology.Drainers = append(clsMeta.Topology.Drainers, tmpIns) } logger.Infof("Imported %d Drainer node(s).", len(clsMeta.Topology.Drainers)) } // TODO: node_exporter and blackbox_exporter on custom port is not supported yet // if it is set on a host line. Global values in group_vars/all.yml will be // correctly parsed. return nil } // readGroupVars sets values from configs in group_vars/ dir func readGroupVars(dir, filename string) (map[string]string, error) { result := make(map[string]string) fileData, err := os.ReadFile(filepath.Join(dir, filename)) if err != nil { return nil, err } if err = yaml.Unmarshal(fileData, &result); err != nil { return nil, err } return result, nil } // GetHostPort tries to read the SSH port of the host // 1. get from Host.Vars["ansible_port"] // 2. get from cfg.Section("defaults").Key("remote_port") // 3. get from srv.Port func GetHostPort(srv *aini.Host, cfg *ini.File) int { // parse per host config // aini parse the port inline with hostnames (e.g., something like `host:22`) // but not handling the "ansible_port" variable if port, ok := srv.Vars["ansible_port"]; ok { intPort, err := strconv.Atoi(port) if err == nil { return intPort } } // try to get value from global ansible config if cfg != nil { rPort, err := cfg.Section("defaults").Key("remote_port").Int() if err == nil { return rPort } } return srv.Port } // readAnsibleCfg tries to read global configs of ansible func readAnsibleCfg(cfgFile string) (*ini.File, error) { var cfgData []byte // raw config ini data var err error // try to read from env if the file does not exist if _, err = os.Stat(cfgFile); cfgFile == "" || err != nil { if !os.IsNotExist(err) { return nil, err } data := os.Getenv("ANSIBLE_CONFIG") if data == "" { return nil, nil } cfgData = []byte(data) } else { cfgData, err = os.ReadFile(cfgFile) if err != nil { return nil, err } } return ini.Load(cfgData) } tiup-1.16.3/pkg/cluster/ansible/service.go000066400000000000000000000231551505422223000204320ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "context" "fmt" "path/filepath" "strconv" "strings" "time" "github.com/BurntSushi/toml" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" ) var ( systemdUnitPath = "/etc/systemd/system" ) // parseDirs sets values of directories of component // //revive:disable func parseDirs(ctx context.Context, user string, ins spec.InstanceSpec, sshTimeout uint64, sshType executor.SSHType) (spec.InstanceSpec, error) { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) hostName, sshPort := ins.SSH() e, err := executor.New(sshType, false, executor.SSHConfig{ Host: hostName, Port: sshPort, User: user, KeyFile: SSHKeyPath(), // ansible generated keyfile Timeout: time.Second * time.Duration(sshTimeout), }) if err != nil { return nil, err } logger.Debugf("Detecting deploy paths on %s...", hostName) stdout, err := readStartScript(ctx, e, ins.Role(), hostName, ins.GetMainPort()) if len(stdout) <= 1 || err != nil { return ins, err } switch ins.Role() { case spec.ComponentTiDB: // parse dirs newIns := ins.(*spec.TiDBSpec) for line := range strings.SplitSeq(stdout, "\n") { if after, ok := strings.CutPrefix(line, "DEPLOY_DIR="); ok { newIns.DeployDir = after continue } if strings.Contains(line, "--log-file=") { fullLog := strings.Split(line, " ")[4] // 4 whitespaces ahead logDir := strings.TrimSuffix(strings.TrimPrefix(fullLog, "--log-file=\""), "/tidb.log\"") newIns.LogDir = logDir continue } } return newIns, nil case spec.ComponentTiKV: // parse dirs newIns := ins.(*spec.TiKVSpec) for line := range strings.SplitSeq(stdout, "\n") { if strings.HasPrefix(line, "cd \"") { newIns.DeployDir = strings.Trim(strings.Split(line, " ")[1], "\"") continue } if strings.Contains(line, "--data-dir") { dataDir := strings.Split(line, " ")[5] // 4 whitespaces ahead newIns.DataDir = strings.Trim(dataDir, "\"") continue } if strings.Contains(line, "--log-file") { fullLog := strings.Split(line, " ")[5] // 4 whitespaces ahead logDir := strings.TrimSuffix(strings.TrimPrefix(fullLog, "\""), "/tikv.log\"") newIns.LogDir = logDir continue } } return newIns, nil case spec.ComponentPD: // parse dirs newIns := ins.(*spec.PDSpec) for line := range strings.SplitSeq(stdout, "\n") { if after, ok := strings.CutPrefix(line, "DEPLOY_DIR="); ok { newIns.DeployDir = after continue } if strings.Contains(line, "--name") { nameArg := strings.Split(line, " ")[4] // 4 whitespaces ahead name := strings.TrimPrefix(nameArg, "--name=") newIns.Name = strings.Trim(name, "\"") continue } if strings.Contains(line, "--data-dir") { dataArg := strings.Split(line, " ")[4] // 4 whitespaces ahead dataDir := strings.TrimPrefix(dataArg, "--data-dir=") newIns.DataDir = strings.Trim(dataDir, "\"") continue } if strings.Contains(line, "--log-file=") { fullLog := strings.Split(line, " ")[4] // 4 whitespaces ahead logDir := strings.TrimSuffix(strings.TrimPrefix(fullLog, "--log-file=\""), "/pd.log\"") newIns.LogDir = logDir continue } } return newIns, nil case spec.ComponentTiFlash: // parse dirs newIns := ins.(*spec.TiFlashSpec) for line := range strings.SplitSeq(stdout, "\n") { if strings.HasPrefix(line, "cd \"") { newIns.DeployDir = strings.Trim(strings.Split(line, " ")[1], "\"") continue } // exec bin/tiflash/tiflash server --config-file conf/tiflash.toml if strings.Contains(line, "-config-file") { // parser the config file for `path` and `tmp_path` part := strings.Split(line, " ") fname := part[len(part)-1] fname = strings.TrimSpace(fname) if !filepath.IsAbs(fname) { fname = filepath.Join(newIns.DeployDir, fname) } err := parseTiflashConfig(ctx, e, newIns, fname) if err != nil { return nil, err } } } return newIns, nil case spec.ComponentPump: // parse dirs newIns := ins.(*spec.PumpSpec) for line := range strings.SplitSeq(stdout, "\n") { if after, ok := strings.CutPrefix(line, "DEPLOY_DIR="); ok { newIns.DeployDir = after continue } if strings.Contains(line, "--data-dir") { dataArg := strings.Split(line, " ")[4] // 4 whitespaces ahead dataDir := strings.TrimPrefix(dataArg, "--data-dir=") newIns.DataDir = strings.Trim(dataDir, "\"") continue } if strings.Contains(line, "--log-file=") { fullLog := strings.Split(line, " ")[4] // 4 whitespaces ahead logDir := strings.TrimSuffix(strings.TrimPrefix(fullLog, "--log-file=\""), "/pump.log\"") newIns.LogDir = logDir continue } } return newIns, nil case spec.ComponentDrainer: // parse dirs newIns := ins.(*spec.DrainerSpec) for line := range strings.SplitSeq(stdout, "\n") { if after, ok := strings.CutPrefix(line, "DEPLOY_DIR="); ok { newIns.DeployDir = after continue } if strings.Contains(line, "--data-dir") { dataArg := strings.Split(line, " ")[4] // 4 whitespaces ahead dataDir := strings.TrimPrefix(dataArg, "--data-dir=") newIns.DataDir = strings.Trim(dataDir, "\"") continue } if strings.Contains(line, "--log-file=") { fullLog := strings.Split(line, " ")[4] // 4 whitespaces ahead logDir := strings.TrimSuffix(strings.TrimPrefix(fullLog, "--log-file=\""), "/drainer.log\"") newIns.LogDir = logDir continue } if strings.Contains(line, "--initial-commit-ts=") { tsArg := strings.Split(line, " ")[4] // 4 whitespaces ahead tmpTs, _ := strconv.Atoi(strings.TrimPrefix(tsArg, "--initial-commit-ts=")) newIns.Config = make(map[string]any) newIns.Config["initial-commit-ts"] = int64(tmpTs) } } return newIns, nil case spec.ComponentPrometheus: // parse dirs newIns := ins.(*spec.PrometheusSpec) for line := range strings.SplitSeq(stdout, "\n") { if after, ok := strings.CutPrefix(line, "DEPLOY_DIR="); ok { newIns.DeployDir = after continue } if strings.Contains(line, "exec > >(tee -i -a") { fullLog := strings.Split(line, " ")[5] logDir := strings.TrimSuffix(strings.TrimPrefix(fullLog, "\""), "/prometheus.log\")") newIns.LogDir = logDir continue } if strings.Contains(line, "--storage.tsdb.path=") { dataArg := strings.Split(line, " ")[4] // 4 whitespaces ahead dataDir := strings.TrimPrefix(dataArg, "--storage.tsdb.path=") newIns.DataDir = strings.Trim(dataDir, "\"") continue } } return newIns, nil case spec.ComponentAlertmanager: // parse dirs newIns := ins.(*spec.AlertmanagerSpec) for line := range strings.SplitSeq(stdout, "\n") { if after, ok := strings.CutPrefix(line, "DEPLOY_DIR="); ok { newIns.DeployDir = after continue } if strings.Contains(line, "exec > >(tee -i -a") { fullLog := strings.Split(line, " ")[5] logDir := strings.TrimSuffix(strings.TrimPrefix(fullLog, "\""), "/alertmanager.log\")") newIns.LogDir = logDir continue } if strings.Contains(line, "--storage.path=") { dataArg := strings.Split(line, " ")[4] // 4 whitespaces ahead dataDir := strings.TrimPrefix(dataArg, "--storage.path=") newIns.DataDir = strings.Trim(dataDir, "\"") continue } } return newIns, nil case spec.ComponentGrafana: // parse dirs newIns := ins.(*spec.GrafanaSpec) for line := range strings.SplitSeq(stdout, "\n") { if after, ok := strings.CutPrefix(line, "DEPLOY_DIR="); ok { newIns.DeployDir = after continue } } return newIns, nil } return ins, nil } func parseTiflashConfig(ctx context.Context, e ctxt.Executor, spec *spec.TiFlashSpec, fname string) error { data, err := readFile(ctx, e, fname) if err != nil { return err } err = parseTiflashConfigFromFileData(spec, data) if err != nil { return err } return nil } func parseTiflashConfigFromFileData(spec *spec.TiFlashSpec, data []byte) error { cfg := make(map[string]any) err := toml.Unmarshal(data, &cfg) if err != nil { return errors.AddStack(err) } if path, ok := cfg["path"]; ok { spec.DataDir = fmt.Sprintf("%v", path) } if tmpPath, ok := cfg["tmp_path"]; ok { spec.TmpDir = fmt.Sprintf("%v", tmpPath) } return nil } func readFile(ctx context.Context, e ctxt.Executor, fname string) (data []byte, err error) { cmd := fmt.Sprintf("cat %s", fname) stdout, stderr, err := e.Execute(ctx, cmd, false) if err != nil { return nil, errors.Annotatef(err, "stderr: %s", stderr) } return stdout, nil } func readStartScript(ctx context.Context, e ctxt.Executor, component, host string, port int) (string, error) { serviceFile := fmt.Sprintf("%s/%s-%d.service", systemdUnitPath, component, port) cmd := fmt.Sprintf("cat `grep 'ExecStart' %s | sed 's/ExecStart=//'`", serviceFile) stdout, stderr, err := e.Execute(ctx, cmd, false) if err != nil { return string(stdout), err } if len(stderr) > 0 { return string(stdout), errors.Errorf( "can not detect dir paths of %s %s:%d, %s", component, host, port, stderr, ) } return string(stdout), nil } tiup-1.16.3/pkg/cluster/ansible/service_test.go000066400000000000000000000033501505422223000214640ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ansible import ( "testing" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/stretchr/testify/require" ) var tiflashConfig = ` default_profile = "default" display_name = "TiFlash" listen_host = "0.0.0.0" path = "/data1/test-cluster/leiysky-ansible-test-deploy/tiflash/data/db" tmp_path = "/data1/test-cluster/leiysky-ansible-test-deploy/tiflash/data/db/tmp" [flash] service_addr = "172.16.5.85:11317" tidb_status_addr = "172.16.5.85:11310" [flash.flash_cluster] cluster_manager_path = "/data1/test-cluster/leiysky-ansible-test-deploy/bin/tiflash/flash_cluster_manager" log = "/data1/test-cluster/leiysky-ansible-test-deploy/log/tiflash_cluster_manager.log" master_ttl = 60 refresh_interval = 20 update_rule_interval = 5 [flash.proxy] config = "/data1/test-cluster/leiysky-ansible-test-deploy/conf/tiflash-learner.toml" ` func TestParseTiflashConfigFromFileData(t *testing.T) { specObj := new(spec.TiFlashSpec) data := []byte(tiflashConfig) err := parseTiflashConfigFromFileData(specObj, data) require.NoError(t, err) require.Equal(t, "/data1/test-cluster/leiysky-ansible-test-deploy/tiflash/data/db", specObj.DataDir) require.Equal(t, "/data1/test-cluster/leiysky-ansible-test-deploy/tiflash/data/db/tmp", specObj.TmpDir) } tiup-1.16.3/pkg/cluster/ansible/test-data/000077500000000000000000000000001505422223000203235ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/ansible/test-data/ansible.cfg000066400000000000000000000006231505422223000224220ustar00rootroot00000000000000[defaults] ## Customize this! inventory = inventory.ini transport = ssh # disable SSH key host checking host_key_checking = False# gathering = smart gathering = explicitfact_caching = jsonfile fact_caching_connection = fact_files retry_files_save_path = retry_files#remote_tmp = /tmp/ansible# for slow connections timeout = 10 gather_subset = network,hardware# if ssh port is not 22 remote_port = 9999 tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/000077500000000000000000000000001505422223000225125ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/alertmanager_servers.yml000066400000000000000000000000741505422223000274510ustar00rootroot00000000000000--- alertmanager_port: 9093 alertmanager_cluster_port: 9094 tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/all.yml000066400000000000000000000023701505422223000240070ustar00rootroot00000000000000--- # Variables here are applicable to all host groups deploy_user: "{{ ansible_user }}" status_dir: "{{ deploy_dir }}/status" backup_dir: "{{ deploy_dir }}/backup" images_dir: "{{ deploy_dir }}/images" # Local downloads_dir: "{{ playbook_dir }}/downloads" resources_dir: "{{ playbook_dir }}/resources" fetch_tmp_dir: "{{ playbook_dir }}/collect_diagnosis_data" fetch_dir: "{{ playbook_dir }}/collect_diagnosis" cert_dir: "{{ playbook_dir }}/conf/ssl" script_dir: "{{ playbook_dir }}/scripts" binary_dir: "{{ playbook_dir }}/resources/bin" # default configuration for multiple host groups and roles node_exporter_port: 9101 blackbox_exporter_port: 9115 kafka_exporter_port: 9308 # docker docker_bin_dir: "/usr/bin" # Random shifts for retrying failed ops like downloading retry_stagger: 5 # deployment methods, [binary, docker] docker deployment method is not recommended and deprecated. deployment_method: binary enable_log_clean: False log_retain_days: 28 dev_mode: True # systemd: Specifies whether to send SIGKILL to remaining processes after a timeout. disable_send_sigkill: False # pump pump_port: 8250 pump_data_dir: "{{ deploy_dir }}/data.pump" pump_log_dir: "{{ deploy_dir }}/log" pump_cert_dir: "{{ deploy_dir }}/conf/ssl" # drainer drainer_port: 8249 tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/drainer_servers.yml000066400000000000000000000000041505422223000264240ustar00rootroot00000000000000--- tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/grafana_servers.yml000066400000000000000000000001151505422223000264020ustar00rootroot00000000000000--- grafana_port: 3000 grafana_api_keys_dir: "{{ playbook_dir }}/conf/keys" tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/importer_server.yml000066400000000000000000000004421505422223000264640ustar00rootroot00000000000000--- dummy: # this directory is used to store the data written by `tidb-lightning` import_dir: "{{ deploy_dir }}/data.import" # the listening address of tikv-importer. tidb-lightning needs to connect to this address to write data. Set it to the actual IP address. tikv_importer_port: 8287 tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/lightning_server.yml000066400000000000000000000004571505422223000266140ustar00rootroot00000000000000--- dummy: # background profile for debugging tidb_lightning_pprof_port: 8289 # the source data directory of Mydumper data_source_dir: "{{ deploy_dir }}/mydumper" # Tidb cluster information to import data # tidb_host: "" # tidb_port: 4000 # tidb_user: "" # tidb_password: "" # tidb_status_port: 10080 tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/monitored_servers.yml000066400000000000000000000000621505422223000270040ustar00rootroot00000000000000--- node_exporter_log_dir: "{{ deploy_dir }}/log" tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/monitoring_servers.yml000066400000000000000000000002021505422223000271650ustar00rootroot00000000000000--- prometheus_port: 9090 pushgateway_port: 9091 # How long to retain samples in the storage prometheus_storage_retention: "30d" tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/pd_servers.yml000066400000000000000000000002521505422223000254100ustar00rootroot00000000000000--- dummy: pd_client_port: 2379 pd_peer_port: 2380 pd_data_dir: "{{ deploy_dir }}/data.pd" pd_log_dir: "{{ deploy_dir }}/log" pd_cert_dir: "{{ deploy_dir }}/conf/ssl" tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/pump_servers.yml000066400000000000000000000000041505422223000257610ustar00rootroot00000000000000--- tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/tidb_servers.yml000066400000000000000000000002051505422223000257250ustar00rootroot00000000000000--- dummy: tidb_port: 4000 tidb_status_port: 10080 tidb_log_dir: "{{ deploy_dir }}/log" tidb_cert_dir: "{{ deploy_dir }}/conf/ssl" tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/tiflash_servers.yml000066400000000000000000000000001505422223000264260ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/ansible/test-data/group_vars/tikv_servers.yml000066400000000000000000000002621505422223000257630ustar00rootroot00000000000000--- dummy: tikv_port: 20160 tikv_status_port: 20180 tikv_data_dir: "{{ deploy_dir }}/tikv_data" tikv_log_dir: "{{ deploy_dir }}/log" tikv_cert_dir: "{{ deploy_dir }}/conf/ssl" tiup-1.16.3/pkg/cluster/ansible/test-data/inventory.ini000066400000000000000000000042661505422223000230710ustar00rootroot00000000000000## TiDB Cluster Part [tidb_servers] tidb218 ansible_host=172.16.1.218 tidb_status_port=3399 172.16.1.219 tidb_port=3397 [tikv_servers] 172.16.1.219 labels="tiup=depsrv" 172.16.1.220 tikv_port=20166 labels="tiup=depsrv" 172.16.1.221 labels="host=tikv221" [pd_servers] TiDB-PD-218 ansible_host=172.16.1.218 ansible_port=30000 172.16.1.219 ansible_port=2222 172.16.1.220 deploy_dir=/data-path/custom_deploy/pd220 [spark_master] [spark_slaves] [lightning_server] [importer_server] ## Monitoring Part # prometheus and pushgateway servers [monitoring_servers] 172.16.1.221 [grafana_servers] 172.16.1.221 # node_exporter and blackbox_exporter servers [monitored_servers] 172.16.1.218 172.16.1.219 172.16.1.220 172.16.1.221 [alertmanager_servers] 172.16.1.221 [kafka_exporter_servers] ## Binlog Part [pump_servers] 172.16.1.219 172.16.1.220 pump_port=8333 [drainer_servers] 172.16.1.220 drainer_port=8444 172.16.1.221 [tiflash_servers] tiflash1 ansible_host=172.16.1.222 tiflash2 ansible_host=172.16.1.223 ansible_port=30000 ## Group variables [pd_servers:vars] location_labels = ["zone","rack","host","tiup"] ## Global variables [all:vars] deploy_dir = /home/tiopsimport/ansible-deploy ## Connection # ssh via normal user ansible_user = tiops cluster_name = ansible-cluster tidb_version = v3.0.12 # process supervision, [systemd, supervise] process_supervision = systemd timezone = Asia/Shanghai enable_firewalld = False # check NTP service enable_ntpd = True set_hostname = True ## binlog trigger enable_binlog = True # kafka cluster address for monitoring, example: # kafka_addrs = "192.168.0.11:9092,192.168.0.12:9092,192.168.0.13:9092" kafka_addrs = "" zookeeper_addrs = "" # enable TLS authentication in the TiDB cluster enable_tls = False # KV mode deploy_without_tidb = False # wait for region replication complete before start tidb-server. wait_replication = True # Optional: Set if you already have a alertmanager server. # Format: alertmanager_host:alertmanager_port alertmanager_target = "" grafana_admin_user = "foo" grafana_admin_password = bar ### Collect diagnosis collect_log_recent_hours = 2 enable_bandwidth_limit = False # default: 10Mb/s, unit: Kbit/s collect_bandwidth_limit = 10000 tiup-1.16.3/pkg/cluster/ansible/test-data/meta.yaml000066400000000000000000000117241505422223000221420ustar00rootroot00000000000000user: tiops tidb_version: v3.0.12 topology: global: user: tiops ssh_port: 9999 deploy_dir: /home/tiopsimport/ansible-deploy data_dir: data os: linux arch: amd64 monitored: node_exporter_port: 9101 blackbox_exporter_port: 9115 deploy_dir: /home/tiopsimport/ansible-deploy data_dir: /home/tiopsimport/ansible-deploy/data log_dir: /home/tiopsimport/ansible-deploy/log server_configs: tidb: binlog.enable: true tikv: {} pd: {} tso: {} scheduling: {} tidb_dashboard: {} tiflash: {} tiproxy: {} tiflash-learner: {} pump: {} drainer: {} cdc: {} kvcdc: {} grafana: {} tidb_servers: - host: 172.16.1.218 ssh_port: 9999 imported: true port: 4000 status_port: 3399 deploy_dir: /home/tiopsimport/ansible-deploy/tidb-4000 arch: amd64 os: linux - host: 172.16.1.219 ssh_port: 2222 imported: true port: 3397 status_port: 10080 deploy_dir: /home/tiopsimport/ansible-deploy/tidb-3397 arch: amd64 os: linux tikv_servers: - host: 172.16.1.219 ssh_port: 2222 imported: true port: 20160 status_port: 20180 deploy_dir: /home/tiopsimport/ansible-deploy/tikv-20160 data_dir: data/tikv-20160 arch: amd64 os: linux - host: 172.16.1.220 ssh_port: 9999 imported: true port: 20166 status_port: 20180 deploy_dir: /home/tiopsimport/ansible-deploy/tikv-20166 data_dir: data/tikv-20166 arch: amd64 os: linux - host: 172.16.1.221 ssh_port: 9999 imported: true port: 20160 status_port: 20180 deploy_dir: /home/tiopsimport/ansible-deploy/tikv-20160 data_dir: data/tikv-20160 arch: amd64 os: linux tiflash_servers: - host: 172.16.1.222 ssh_port: 9999 imported: true tcp_port: 9000 flash_service_port: 3930 flash_proxy_port: 20170 flash_proxy_status_port: 20292 metrics_port: 8234 deploy_dir: /home/tiopsimport/ansible-deploy/tiflash-9000 data_dir: data/tiflash-9000 arch: amd64 os: linux - host: 172.16.1.223 ssh_port: 30000 imported: true tcp_port: 9000 flash_service_port: 3930 flash_proxy_port: 20170 flash_proxy_status_port: 20292 metrics_port: 8234 deploy_dir: /home/tiopsimport/ansible-deploy/tiflash-9000 data_dir: data/tiflash-9000 arch: amd64 os: linux pd_servers: - host: 172.16.1.218 ssh_port: 30000 imported: true name: TiDB-PD-218 client_port: 2379 peer_port: 2380 deploy_dir: /home/tiopsimport/ansible-deploy/pd-2379 data_dir: data/pd-2379 arch: amd64 os: linux - host: 172.16.1.219 ssh_port: 2222 imported: true name: pd-172.16.1.219-2379 client_port: 2379 peer_port: 2380 deploy_dir: /home/tiopsimport/ansible-deploy/pd-2379 data_dir: data/pd-2379 arch: amd64 os: linux - host: 172.16.1.220 ssh_port: 9999 imported: true name: pd-172.16.1.220-2379 client_port: 2379 peer_port: 2380 deploy_dir: /home/tiopsimport/ansible-deploy/pd-2379 data_dir: data/pd-2379 arch: amd64 os: linux pump_servers: - host: 172.16.1.219 ssh_port: 2222 imported: true port: 8250 deploy_dir: /home/tiopsimport/ansible-deploy/pump-8250 data_dir: data/pump-8250 arch: amd64 os: linux - host: 172.16.1.220 ssh_port: 9999 imported: true port: 8333 deploy_dir: /home/tiopsimport/ansible-deploy/pump-8333 data_dir: data/pump-8333 arch: amd64 os: linux drainer_servers: - host: 172.16.1.220 ssh_port: 9999 imported: true port: 8444 deploy_dir: /home/tiopsimport/ansible-deploy/drainer-8444 data_dir: data/drainer-8444 arch: amd64 os: linux - host: 172.16.1.221 ssh_port: 9999 imported: true port: 8249 deploy_dir: /home/tiopsimport/ansible-deploy/drainer-8249 data_dir: data/drainer-8249 arch: amd64 os: linux monitoring_servers: - host: 172.16.1.221 ssh_port: 9999 imported: true port: 9090 deploy_dir: /home/tiopsimport/ansible-deploy/prometheus-9090 data_dir: data/prometheus-9090 storage_retention: 30d external_alertmanagers: [] arch: amd64 os: linux grafana_servers: - host: 172.16.1.221 ssh_port: 9999 imported: true port: 3000 username: foo password: bar deploy_dir: /home/tiopsimport/ansible-deploy/grafana-3000 arch: amd64 os: linux alertmanager_servers: - host: 172.16.1.221 ssh_port: 9999 imported: true web_port: 9093 cluster_port: 9094 deploy_dir: /home/tiopsimport/ansible-deploy/alertmanager-9093 data_dir: data/alertmanager-9093 arch: amd64 os: linux tiproxy_servers: [] tiup-1.16.3/pkg/cluster/api/000077500000000000000000000000001505422223000155715ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/api/binlog.go000066400000000000000000000174741505422223000174070ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "net/http" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/utils" clientv3 "go.etcd.io/etcd/client/v3" ) // BinlogClient is the client of binlog. type BinlogClient struct { tls *tls.Config httpClient *http.Client etcdClient *clientv3.Client } // NewBinlogClient create a BinlogClient. func NewBinlogClient(pdEndpoints []string, timeout time.Duration, tlsConfig *tls.Config) (*BinlogClient, error) { if timeout < time.Second { timeout = time.Second * 5 } etcdClient, err := clientv3.New(clientv3.Config{ Endpoints: pdEndpoints, DialTimeout: timeout, TLS: tlsConfig, }) if err != nil { return nil, errors.AddStack(err) } return &BinlogClient{ tls: tlsConfig, httpClient: utils.NewHTTPClient(timeout, tlsConfig).Client(), etcdClient: etcdClient, }, nil } func (c *BinlogClient) getURL(addr string) string { scheme := "http" if c.tls != nil { scheme = "https" } return fmt.Sprintf("%s://%s", scheme, addr) } func (c *BinlogClient) getOfflineURL(addr string, nodeID string) string { return fmt.Sprintf("%s/state/%s/close", c.getURL(addr), nodeID) } // StatusResp represents the response of status api. type StatusResp struct { Code int `json:"code"` Message string `json:"message"` } // NodeStatus represents the status saved in etcd. type NodeStatus struct { NodeID string `json:"nodeId"` Addr string `json:"host"` State string `json:"state"` MaxCommitTS int64 `json:"maxCommitTS"` UpdateTS int64 `json:"updateTS"` } // IsPumpTombstone check if drainer is tombstone. func (c *BinlogClient) IsPumpTombstone(ctx context.Context, addr string) (bool, error) { nodeID, err := c.nodeID(ctx, addr, "pumps") if err != nil { return false, err } return c.isTombstone(ctx, "pumps", nodeID) } // IsDrainerTombstone check if drainer is tombstone. func (c *BinlogClient) IsDrainerTombstone(ctx context.Context, addr string) (bool, error) { nodeID, err := c.nodeID(ctx, addr, "drainers") if err != nil { return false, err } return c.isTombstone(ctx, "drainers", nodeID) } func (c *BinlogClient) isTombstone(ctx context.Context, ty string, nodeID string) (bool, error) { s, err := c.nodeStatus(ctx, ty, nodeID) if err != nil { return false, err } if s.State == "offline" { return true, nil } return false, nil } // nolint (unused) func (c *BinlogClient) pumpNodeStatus(ctx context.Context) (status []*NodeStatus, err error) { return c.nodesStatus(ctx, "pumps") } // nolint (unused) func (c *BinlogClient) drainerNodeStatus(ctx context.Context) (status []*NodeStatus, err error) { return c.nodesStatus(ctx, "drainers") } func (c *BinlogClient) nodeID(ctx context.Context, addr, ty string) (string, error) { // the number of nodes with the same ip:port targetNodes := []string{} nodes, err := c.nodesStatus(ctx, ty) if err != nil { return "", err } addrs := []string{} for _, node := range nodes { if addr == node.Addr { targetNodes = append(targetNodes, node.NodeID) continue } addrs = append(addrs, addr) } switch len(targetNodes) { case 0: return "", errors.Errorf("%s node id for address %s not found, found address: %s", ty, addr, addrs) case 1: return targetNodes[0], nil default: return "", errors.Errorf("found multiple %s nodes with the same host, found nodes: %s", ty, strings.Join(targetNodes, ",")) } } // UpdateDrainerState update the specify state as the specified state. func (c *BinlogClient) UpdateDrainerState(ctx context.Context, addr string, state string) error { nodeID, err := c.nodeID(ctx, addr, "drainers") if err != nil { return err } return c.updateStatus(ctx, "drainers", nodeID, state) } // UpdatePumpState update the specify state as the specified state. func (c *BinlogClient) UpdatePumpState(ctx context.Context, addr string, state string) error { nodeID, err := c.nodeID(ctx, addr, "pumps") if err != nil { return err } return c.updateStatus(ctx, "pumps", nodeID, state) } // updateStatus update the specify state as the specified state. func (c *BinlogClient) updateStatus(ctx context.Context, ty string, nodeID string, state string) error { ctx, f := context.WithTimeout(ctx, c.httpClient.Timeout) defer f() s, err := c.nodeStatus(ctx, ty, nodeID) if err != nil { return errors.AddStack(err) } if s.State == state { return nil } s.State = state data, err := json.Marshal(&s) if err != nil { return errors.AddStack(err) } key := fmt.Sprintf("/tidb-binlog/v1/%s/%s", ty, nodeID) _, err = c.etcdClient.Put(ctx, key, string(data)) if err != nil { return errors.AddStack(err) } return nil } func (c *BinlogClient) nodesStatus(ctx context.Context, ty string) (status []*NodeStatus, err error) { key := fmt.Sprintf("/tidb-binlog/v1/%s", ty) // set timeout, otherwise it will keep retrying ctx, f := context.WithTimeout(ctx, c.httpClient.Timeout) defer f() resp, err := c.etcdClient.KV.Get(ctx, key, clientv3.WithPrefix()) if err != nil { return nil, errors.AddStack(err) } for _, kv := range resp.Kvs { var s NodeStatus err = json.Unmarshal(kv.Value, &s) if err != nil { return nil, errors.Annotatef(err, "key: %s,data: %s", string(kv.Key), string(kv.Value)) } status = append(status, &s) } return } // nodeStatus get nodeStatus with nodeID func (c *BinlogClient) nodeStatus(ctx context.Context, ty string, nodeID string) (node *NodeStatus, err error) { key := fmt.Sprintf("/tidb-binlog/v1/%s/%s", ty, nodeID) resp, err := c.etcdClient.KV.Get(ctx, key) if err != nil { return nil, errors.AddStack(err) } if len(resp.Kvs) > 0 { err = json.Unmarshal(resp.Kvs[0].Value, &node) if err != nil { return nil, errors.Annotatef(err, "key: %s,data: %s", string(resp.Kvs[0].Key), string(resp.Kvs[0].Value)) } return } return nil, errors.Errorf("%s node-id: %s not found, found address: %s", ty, nodeID, key) } func (c *BinlogClient) offline(addr string, nodeID string) error { url := c.getOfflineURL(addr, nodeID) req, err := http.NewRequest("PUT", url, nil) if err != nil { return errors.AddStack(err) } resp, err := c.httpClient.Do(req) if err != nil { return errors.AddStack(err) } if resp.StatusCode < 200 || resp.StatusCode >= 400 { return errors.Errorf("error requesting %s, code: %d", resp.Request.URL, resp.StatusCode) } defer resp.Body.Close() data, err := io.ReadAll(resp.Body) if err != nil { return errors.AddStack(err) } var status StatusResp err = json.Unmarshal(data, &status) if err != nil { return errors.Annotatef(err, "data: %s", string(data)) } if status.Code != 200 { return errors.Errorf("server error: %s", status.Message) } return nil } // OfflinePump offline a pump. func (c *BinlogClient) OfflinePump(ctx context.Context, addr string) error { nodeID, err := c.nodeID(ctx, addr, "pumps") if err != nil { return err } s, err := c.nodeStatus(ctx, "pumps", nodeID) if err != nil { return err } if s.State == "offline" { return nil } return c.offline(addr, nodeID) } // OfflineDrainer offline a drainer. func (c *BinlogClient) OfflineDrainer(ctx context.Context, addr string) error { nodeID, err := c.nodeID(ctx, addr, "drainers") if err != nil { return err } s, err := c.nodeStatus(ctx, "drainers", nodeID) if err != nil { return err } if s.State == "offline" { return nil } return c.offline(addr, nodeID) } tiup-1.16.3/pkg/cluster/api/cdcapi.go000066400000000000000000000256441505422223000173560ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import ( "bytes" "context" "crypto/tls" "encoding/json" "fmt" "net/http" "time" "github.com/pingcap/errors" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/utils" ) // CDCOpenAPIClient is client for access TiCDC Open API type CDCOpenAPIClient struct { urls []string client *utils.HTTPClient ctx context.Context } // NewCDCOpenAPIClient return a `CDCOpenAPIClient` func NewCDCOpenAPIClient(ctx context.Context, addresses []string, timeout time.Duration, tlsConfig *tls.Config) *CDCOpenAPIClient { httpPrefix := "http" if tlsConfig != nil { httpPrefix = "https" } urls := make([]string, 0, len(addresses)) for _, addr := range addresses { urls = append(urls, fmt.Sprintf("%s://%s", httpPrefix, addr)) } return &CDCOpenAPIClient{ urls: urls, client: utils.NewHTTPClient(timeout, tlsConfig), ctx: ctx, } } func (c *CDCOpenAPIClient) getEndpoints(api string) (endpoints []string) { for _, url := range c.urls { endpoints = append(endpoints, fmt.Sprintf("%s/%s", url, api)) } return endpoints } func drainCapture(client *CDCOpenAPIClient, target string) (int, error) { api := "api/v1/captures/drain" endpoints := client.getEndpoints(api) request := DrainCaptureRequest{ CaptureID: target, } body, err := json.Marshal(request) if err != nil { return 0, err } var resp DrainCaptureResp _, err = tryURLs(endpoints, func(endpoint string) ([]byte, error) { data, statusCode, err := client.client.Put(client.ctx, endpoint, bytes.NewReader(body)) if err != nil { switch statusCode { case http.StatusNotFound: // old version cdc does not support `DrainCapture`, return nil to trigger hard restart. client.l().Debugf("cdc drain capture does not support, ignore it, target: %s, err: %+v", target, err) return data, nil case http.StatusServiceUnavailable: if bytes.Contains(data, []byte("CDC:ErrVersionIncompatible")) { client.l().Debugf("cdc drain capture meet version incompatible, ignore it, target: %s, err: %+v", target, err) return data, nil } // cdc is not ready to accept request, return error to trigger retry. client.l().Debugf("cdc drain capture meet service unavailable, retry it, target: %s, err: %+v", target, err) return data, err default: } // match https://github.com/pingcap/tiflow/blob/e3d0d9d23b77c7884b70016ddbd8030ffeb95dfd/pkg/errors/cdc_errors.go#L55-L57 if bytes.Contains(data, []byte("CDC:ErrSchedulerRequestFailed")) { client.l().Debugf("cdc drain capture failed, data: %s, err: %+v", data, err) return data, nil } // match https://github.com/pingcap/tiflow/blob/e3d0d9d23b77c7884b70016ddbd8030ffeb95dfd/pkg/errors/cdc_errors.go#L51-L54 if bytes.Contains(data, []byte("CDC:ErrCaptureNotExist")) { client.l().Debugf("cdc drain capture failed, data: %s, err: %+v", data, err) return data, nil } client.l().Debugf("cdc drain capture failed, data: %s, statusCode: %d, err: %+v", data, statusCode, err) return data, err } return data, json.Unmarshal(data, &resp) }) return resp.CurrentTableCount, err } // DrainCapture request cdc owner move all tables on the target capture to other captures. func (c *CDCOpenAPIClient) DrainCapture(addr, target string, apiTimeoutSeconds int) error { if _, err := c.getCaptureByID(target); err != nil { c.l().Debugf("cdc drain capture failed, cannot find the capture, address: %s, target: %s, err: %+v", addr, target, err) return err } c.l().Infof("\t Start drain the capture, address: %s, captureID: %s", addr, target) start := time.Now() err := utils.Retry(func() error { count, err := drainCapture(c, target) if err != nil { return err } if count == 0 { return nil } c.l().Infof("\t Still waiting for %d tables to transfer...", count) return fmt.Errorf("drain capture not finished yet, target: %s, count: %d", target, count) }, utils.RetryOption{ Delay: 1 * time.Second, Timeout: time.Duration(apiTimeoutSeconds) * time.Second, }) c.l().Debugf("cdc drain capture finished, target: %s, elapsed: %+v", target, time.Since(start)) return err } // ResignOwner resign the cdc owner, and wait for a new owner be found // address is the current owner's address func (c *CDCOpenAPIClient) ResignOwner(address string) error { err := utils.Retry(func() error { return resignOwner(c, address) }, utils.RetryOption{ Delay: 2 * time.Second, Timeout: 10 * time.Second, }) return err } func resignOwner(c *CDCOpenAPIClient, addr string) error { api := "api/v1/owner/resign" endpoints := c.getEndpoints(api) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := c.client.PostWithStatusCode(c.ctx, endpoint, nil) if err != nil { if statusCode == http.StatusNotFound { c.l().Debugf("resign owner does not found, ignore it, err: %+v", err) return body, nil } return body, err } return body, nil }) if err != nil { return err } owner, err := c.GetOwner() if err != nil { return err } if owner.AdvertiseAddr == addr { return fmt.Errorf("old owner in power again, resign again, owner: %+v", owner) } c.l().Debugf("cdc resign owner successfully, and new owner found, owner: %+v", owner) return nil } // GetOwner return the cdc owner capture information func (c *CDCOpenAPIClient) GetOwner() (result *Capture, err error) { err = utils.Retry(func() error { captures, err := c.GetAllCaptures() if err != nil { return err } for _, capture := range captures { if capture.IsOwner { result = capture return nil } } return fmt.Errorf("no owner found") }, utils.RetryOption{ Delay: time.Second, Timeout: 10 * time.Second, }) return result, err } func (c *CDCOpenAPIClient) getCaptureByID(id string) (*Capture, error) { var result *Capture err := utils.Retry(func() error { captures, err := c.GetAllCaptures() if err != nil { return err } for _, capture := range captures { if capture.ID == id { result = capture return nil } } return fmt.Errorf("target capture not found") }, utils.RetryOption{ Delay: time.Second, Timeout: 10 * time.Second, }) return result, err } // GetCaptureByAddr return the capture information by the address func (c *CDCOpenAPIClient) GetCaptureByAddr(addr string) (result *Capture, err error) { captures, err := c.GetAllCaptures() if err != nil { return nil, err } for _, capture := range captures { if capture.AdvertiseAddr == addr { return capture, nil } } return nil, fmt.Errorf("capture not found, addr: %s", addr) } // GetAllCaptures return all captures instantaneously func (c *CDCOpenAPIClient) GetAllCaptures() ([]*Capture, error) { api := "api/v1/captures" endpoints := c.getEndpoints(api) var result []*Capture _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := c.client.GetWithStatusCode(c.ctx, endpoint) if err != nil { if statusCode == http.StatusNotFound { // old version cdc does not support open api, also the stopped cdc instance // return nil to trigger hard restart c.l().Debugf("get all captures not support, ignore it, err: %+v", err) return body, nil } return body, err } return body, json.Unmarshal(body, &result) }) return result, err } // IsCaptureAlive return error if the capture is not alive func (c *CDCOpenAPIClient) IsCaptureAlive() error { status, err := c.GetStatus() if err != nil { return err } if status.Liveness != LivenessCaptureAlive { return fmt.Errorf("capture is not alive, request url: %+v", c.urls[0]) } return nil } // GetStatus return the status of the TiCDC server. func (c *CDCOpenAPIClient) GetStatus() (result ServerStatus, err error) { api := "api/v1/status" // client should only have address to the target cdc server, not all cdc servers. endpoints := c.getEndpoints(api) err = utils.Retry(func() error { data, statusCode, err := c.client.GetWithStatusCode(c.ctx, endpoints[0]) if err != nil { if statusCode == http.StatusNotFound { c.l().Debugf("capture server status api not support, ignore it, err: %+v", err) return nil } err = json.Unmarshal(data, &result) if err != nil { return err } if result.Liveness == LivenessCaptureAlive { return nil } return errors.New("capture status is not alive, retry it") } return nil }, utils.RetryOption{ Timeout: 10 * time.Second, }) return result, err } // Healthy return true if the TiCDC cluster is healthy func (c *CDCOpenAPIClient) Healthy() error { err := utils.Retry(func() error { return isHealthy(c) }, utils.RetryOption{ Timeout: 10 * time.Second, }) return err } func isHealthy(client *CDCOpenAPIClient) error { api := "api/v1/health" endpoints := client.getEndpoints(api) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { _, statusCode, err := client.client.GetWithStatusCode(client.ctx, endpoint) if err != nil { switch statusCode { // It's likely the TiCDC does not support the API, return error to trigger hard restart. case http.StatusNotFound: client.l().Debugf("cdc check healthy does not support, ignore it") return nil, nil case http.StatusInternalServerError: client.l().Debugf("cdc check healthy: internal server error, retry it, err: %+v", err) } return nil, err } return nil, nil }) return err } func (c *CDCOpenAPIClient) l() *logprinter.Logger { return c.ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) } // Liveness is the liveness status of a capture. type Liveness int32 const ( // LivenessCaptureAlive means the capture is alive, and ready to serve. LivenessCaptureAlive Liveness = 0 // LivenessCaptureStopping means the capture is in the process of graceful shutdown. LivenessCaptureStopping Liveness = 1 ) // ServerStatus holds some common information of a TiCDC server type ServerStatus struct { Version string `json:"version"` GitHash string `json:"git_hash"` ID string `json:"id"` Pid int `json:"pid"` IsOwner bool `json:"is_owner"` Liveness Liveness `json:"liveness"` } // Capture holds common information of a capture in cdc type Capture struct { ID string `json:"id"` IsOwner bool `json:"is_owner"` AdvertiseAddr string `json:"address"` } // DrainCaptureRequest is request for manual `DrainCapture` type DrainCaptureRequest struct { CaptureID string `json:"capture_id"` } // DrainCaptureResp is response for manual `DrainCapture` type DrainCaptureResp struct { CurrentTableCount int `json:"current_table_count"` } tiup-1.16.3/pkg/cluster/api/dmapi.go000066400000000000000000000163571505422223000172260ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import ( "bytes" "context" "crypto/tls" "fmt" "strings" "time" "github.com/gogo/protobuf/jsonpb" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api/dmpb" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) var ( dmMembersURI = "apis/v1alpha1/members" defaultRetryOpt = &utils.RetryOption{ Delay: time.Second * 5, Timeout: time.Second * 60, } ) // DMMasterClient is an HTTP client of the dm-master server type DMMasterClient struct { addrs []string tlsEnabled bool httpClient *utils.HTTPClient } // NewDMMasterClient returns a new PDClient func NewDMMasterClient(addrs []string, timeout time.Duration, tlsConfig *tls.Config) *DMMasterClient { enableTLS := false if tlsConfig != nil { enableTLS = true } return &DMMasterClient{ addrs: addrs, tlsEnabled: enableTLS, httpClient: utils.NewHTTPClient(timeout, tlsConfig), } } // GetURL builds the the client URL of DMClient func (dm *DMMasterClient) GetURL(addr string) string { httpPrefix := "http" if dm.tlsEnabled { httpPrefix = "https" } return fmt.Sprintf("%s://%s", httpPrefix, addr) } func (dm *DMMasterClient) getEndpoints(cmd string) (endpoints []string) { for _, addr := range dm.addrs { endpoint := fmt.Sprintf("%s/%s", dm.GetURL(addr), cmd) endpoints = append(endpoints, endpoint) } return } func (dm *DMMasterClient) getMember(endpoints []string) (*dmpb.ListMemberResponse, error) { resp := &dmpb.ListMemberResponse{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := dm.httpClient.Get(context.TODO(), endpoint) if err != nil { return body, err } err = jsonpb.Unmarshal(strings.NewReader(string(body)), resp) if err != nil { return body, err } if !resp.Result { return body, errors.New("dm-master get members failed: " + resp.Msg) } return body, nil }) return resp, err } func (dm *DMMasterClient) deleteMember(endpoints []string) (*dmpb.OfflineMemberResponse, error) { resp := &dmpb.OfflineMemberResponse{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := dm.httpClient.Delete(context.TODO(), endpoint, nil) if statusCode == 404 || bytes.Contains(body, []byte("not exists")) { zap.L().Debug("member to offline does not exist, ignore.") return body, nil } if err != nil { return body, err } err = jsonpb.Unmarshal(strings.NewReader(string(body)), resp) if err != nil { return body, err } if !resp.Result { return body, errors.New("dm-master offline member failed: " + resp.Msg) } return body, nil }) return resp, err } // GetMaster returns the dm master leader // returns isFound, isActive, isLeader, error func (dm *DMMasterClient) GetMaster(name string) (isFound bool, isActive bool, isLeader bool, err error) { query := "?leader=true&master=true&names=" + name endpoints := dm.getEndpoints(dmMembersURI + query) memberResp, err := dm.getMember(endpoints) if err != nil { zap.L().Error("get dm master status failed", zap.Error(err)) return false, false, false, err } for _, member := range memberResp.GetMembers() { if leader := member.GetLeader(); leader != nil { if leader.GetName() == name { isFound = true isLeader = true } } else if masters := member.GetMaster(); masters != nil { for _, master := range masters.GetMasters() { if master.GetName() == name { isFound = true isActive = master.GetAlive() } } } } return } // GetWorker returns the dm worker status // returns (worker stage, error). If worker stage is "", that means this worker is in cluster func (dm *DMMasterClient) GetWorker(name string) (string, error) { query := "?worker=true&names=" + name endpoints := dm.getEndpoints(dmMembersURI + query) memberResp, err := dm.getMember(endpoints) if err != nil { zap.L().Error("get dm worker status failed", zap.Error(err)) return "", err } stage := "" for _, member := range memberResp.Members { if workers := member.GetWorker(); workers != nil { for _, worker := range workers.GetWorkers() { if worker.GetName() == name { stage = worker.GetStage() } } } } if len(stage) > 0 { stage = strings.ToUpper(stage[0:1]) + stage[1:] } return stage, nil } // GetLeader gets leader of dm cluster func (dm *DMMasterClient) GetLeader(retryOpt *utils.RetryOption) (string, error) { query := "?leader=true" endpoints := dm.getEndpoints(dmMembersURI + query) if retryOpt == nil { retryOpt = defaultRetryOpt } var ( memberResp *dmpb.ListMemberResponse err error ) if err := utils.Retry(func() error { memberResp, err = dm.getMember(endpoints) return err }, *retryOpt); err != nil { return "", err } leaderName := "" for _, member := range memberResp.Members { if leader := member.GetLeader(); leader != nil { leaderName = leader.GetName() } } return leaderName, nil } // GetRegisteredMembers gets all registerer members of dm cluster func (dm *DMMasterClient) GetRegisteredMembers() ([]string, []string, error) { query := "?master=true&worker=true" endpoints := dm.getEndpoints(dmMembersURI + query) memberResp, err := dm.getMember(endpoints) var ( registeredMasters []string registeredWorkers []string ) if err != nil { zap.L().Error("get dm master status failed", zap.Error(err)) return registeredMasters, registeredWorkers, err } for _, member := range memberResp.Members { if masters := member.GetMaster(); masters != nil { for _, master := range masters.GetMasters() { registeredMasters = append(registeredMasters, master.Name) } } else if workers := member.GetWorker(); workers != nil { for _, worker := range workers.GetWorkers() { registeredWorkers = append(registeredWorkers, worker.Name) } } } return registeredMasters, registeredWorkers, nil } // EvictDMMasterLeader evicts the dm master leader func (dm *DMMasterClient) EvictDMMasterLeader(retryOpt *utils.RetryOption) error { return nil } // OfflineMember offlines the member of dm cluster func (dm *DMMasterClient) OfflineMember(query string, retryOpt *utils.RetryOption) error { endpoints := dm.getEndpoints(dmMembersURI + query) if retryOpt == nil { retryOpt = defaultRetryOpt } if err := utils.Retry(func() error { _, err := dm.deleteMember(endpoints) return err }, *retryOpt); err != nil { return fmt.Errorf("error offline member %s, %v, %s", query, err, endpoints[0]) } return nil } // OfflineWorker offlines the dm worker func (dm *DMMasterClient) OfflineWorker(name string, retryOpt *utils.RetryOption) error { query := "/worker/" + name return dm.OfflineMember(query, retryOpt) } // OfflineMaster offlines the dm master func (dm *DMMasterClient) OfflineMaster(name string, retryOpt *utils.RetryOption) error { query := "/master/" + name return dm.OfflineMember(query, retryOpt) } tiup-1.16.3/pkg/cluster/api/dmpb/000077500000000000000000000000001505422223000165135ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/api/dmpb/README.md000066400000000000000000000003141505422223000177700ustar00rootroot00000000000000This package is imported from [TiFlow](https://github.com/pingcap/tiflow/tree/master/dm/pb). The package name has been updated with `sed -i 's/^package pb$/package dmpb/' dmmaster.pb.go dmworker.pb.go`. tiup-1.16.3/pkg/cluster/api/dmpb/dmmaster.pb.go000066400000000000000000015561751505422223000213020ustar00rootroot00000000000000// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: dmmaster.proto package dmpb import ( context "context" fmt "fmt" proto "github.com/gogo/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" emptypb "google.golang.org/protobuf/types/known/emptypb" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type UnlockDDLLockOp int32 const ( UnlockDDLLockOp_InvalidLockOp UnlockDDLLockOp = 0 UnlockDDLLockOp_SkipLock UnlockDDLLockOp = 1 UnlockDDLLockOp_ExecLock UnlockDDLLockOp = 2 ) var UnlockDDLLockOp_name = map[int32]string{ 0: "InvalidLockOp", 1: "SkipLock", 2: "ExecLock", } var UnlockDDLLockOp_value = map[string]int32{ "InvalidLockOp": 0, "SkipLock": 1, "ExecLock": 2, } func (x UnlockDDLLockOp) String() string { return proto.EnumName(UnlockDDLLockOp_name, int32(x)) } func (UnlockDDLLockOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{0} } type SourceOp int32 const ( SourceOp_InvalidSourceOp SourceOp = 0 SourceOp_StartSource SourceOp = 1 SourceOp_UpdateSource SourceOp = 2 SourceOp_StopSource SourceOp = 3 SourceOp_ShowSource SourceOp = 4 ) var SourceOp_name = map[int32]string{ 0: "InvalidSourceOp", 1: "StartSource", 2: "UpdateSource", 3: "StopSource", 4: "ShowSource", } var SourceOp_value = map[string]int32{ "InvalidSourceOp": 0, "StartSource": 1, "UpdateSource": 2, "StopSource": 3, "ShowSource": 4, } func (x SourceOp) String() string { return proto.EnumName(SourceOp_name, int32(x)) } func (SourceOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{1} } type LeaderOp int32 const ( LeaderOp_InvalidLeaderOp LeaderOp = 0 LeaderOp_EvictLeaderOp LeaderOp = 1 LeaderOp_CancelEvictLeaderOp LeaderOp = 2 ) var LeaderOp_name = map[int32]string{ 0: "InvalidLeaderOp", 1: "EvictLeaderOp", 2: "CancelEvictLeaderOp", } var LeaderOp_value = map[string]int32{ "InvalidLeaderOp": 0, "EvictLeaderOp": 1, "CancelEvictLeaderOp": 2, } func (x LeaderOp) String() string { return proto.EnumName(LeaderOp_name, int32(x)) } func (LeaderOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{2} } type CfgType int32 const ( CfgType_InvalidType CfgType = 0 CfgType_TaskType CfgType = 1 CfgType_MasterType CfgType = 2 CfgType_WorkerType CfgType = 3 CfgType_SourceType CfgType = 4 CfgType_TaskTemplateType CfgType = 5 ) var CfgType_name = map[int32]string{ 0: "InvalidType", 1: "TaskType", 2: "MasterType", 3: "WorkerType", 4: "SourceType", 5: "TaskTemplateType", } var CfgType_value = map[string]int32{ "InvalidType": 0, "TaskType": 1, "MasterType": 2, "WorkerType": 3, "SourceType": 4, "TaskTemplateType": 5, } func (x CfgType) String() string { return proto.EnumName(CfgType_name, int32(x)) } func (CfgType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{3} } type RelayOpV2 int32 const ( RelayOpV2_InvalidRelayOpV2 RelayOpV2 = 0 RelayOpV2_StartRelayV2 RelayOpV2 = 1 RelayOpV2_StopRelayV2 RelayOpV2 = 2 ) var RelayOpV2_name = map[int32]string{ 0: "InvalidRelayOpV2", 1: "StartRelayV2", 2: "StopRelayV2", } var RelayOpV2_value = map[string]int32{ "InvalidRelayOpV2": 0, "StartRelayV2": 1, "StopRelayV2": 2, } func (x RelayOpV2) String() string { return proto.EnumName(RelayOpV2_name, int32(x)) } func (RelayOpV2) EnumDescriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{4} } type StartTaskRequest struct { Task string `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` Sources []string `protobuf:"bytes,2,rep,name=sources,proto3" json:"sources,omitempty"` RemoveMeta bool `protobuf:"varint,3,opt,name=removeMeta,proto3" json:"removeMeta,omitempty"` StartTime string `protobuf:"bytes,4,opt,name=startTime,proto3" json:"startTime,omitempty"` } func (m *StartTaskRequest) Reset() { *m = StartTaskRequest{} } func (m *StartTaskRequest) String() string { return proto.CompactTextString(m) } func (*StartTaskRequest) ProtoMessage() {} func (*StartTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{0} } func (m *StartTaskRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *StartTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_StartTaskRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *StartTaskRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StartTaskRequest.Merge(m, src) } func (m *StartTaskRequest) XXX_Size() int { return m.Size() } func (m *StartTaskRequest) XXX_DiscardUnknown() { xxx_messageInfo_StartTaskRequest.DiscardUnknown(m) } var xxx_messageInfo_StartTaskRequest proto.InternalMessageInfo func (m *StartTaskRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *StartTaskRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *StartTaskRequest) GetRemoveMeta() bool { if m != nil { return m.RemoveMeta } return false } func (m *StartTaskRequest) GetStartTime() string { if m != nil { return m.StartTime } return "" } type StartTaskResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` CheckResult string `protobuf:"bytes,4,opt,name=checkResult,proto3" json:"checkResult,omitempty"` } func (m *StartTaskResponse) Reset() { *m = StartTaskResponse{} } func (m *StartTaskResponse) String() string { return proto.CompactTextString(m) } func (*StartTaskResponse) ProtoMessage() {} func (*StartTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{1} } func (m *StartTaskResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *StartTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_StartTaskResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *StartTaskResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StartTaskResponse.Merge(m, src) } func (m *StartTaskResponse) XXX_Size() int { return m.Size() } func (m *StartTaskResponse) XXX_DiscardUnknown() { xxx_messageInfo_StartTaskResponse.DiscardUnknown(m) } var xxx_messageInfo_StartTaskResponse proto.InternalMessageInfo func (m *StartTaskResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *StartTaskResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *StartTaskResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } func (m *StartTaskResponse) GetCheckResult() string { if m != nil { return m.CheckResult } return "" } type OperateTaskRequest struct { Op TaskOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.TaskOp" json:"op,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Sources []string `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *OperateTaskRequest) Reset() { *m = OperateTaskRequest{} } func (m *OperateTaskRequest) String() string { return proto.CompactTextString(m) } func (*OperateTaskRequest) ProtoMessage() {} func (*OperateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{2} } func (m *OperateTaskRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateTaskRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateTaskRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateTaskRequest.Merge(m, src) } func (m *OperateTaskRequest) XXX_Size() int { return m.Size() } func (m *OperateTaskRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateTaskRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateTaskRequest proto.InternalMessageInfo func (m *OperateTaskRequest) GetOp() TaskOp { if m != nil { return m.Op } return TaskOp_InvalidOp } func (m *OperateTaskRequest) GetName() string { if m != nil { return m.Name } return "" } func (m *OperateTaskRequest) GetSources() []string { if m != nil { return m.Sources } return nil } type OperateTaskResponse struct { Op TaskOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.TaskOp" json:"op,omitempty"` Result bool `protobuf:"varint,2,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,3,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,4,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *OperateTaskResponse) Reset() { *m = OperateTaskResponse{} } func (m *OperateTaskResponse) String() string { return proto.CompactTextString(m) } func (*OperateTaskResponse) ProtoMessage() {} func (*OperateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{3} } func (m *OperateTaskResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateTaskResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateTaskResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateTaskResponse.Merge(m, src) } func (m *OperateTaskResponse) XXX_Size() int { return m.Size() } func (m *OperateTaskResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateTaskResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateTaskResponse proto.InternalMessageInfo func (m *OperateTaskResponse) GetOp() TaskOp { if m != nil { return m.Op } return TaskOp_InvalidOp } func (m *OperateTaskResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateTaskResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *OperateTaskResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } // UpdateTaskRequest used to update task after it has beed started // task: task's configuration, yaml format // // now, only support to update config for routes, filters, column-mappings, block-allow-list // support update partial config for syncer, loader, etc later // // sources need to do update, empty for all sources in processing the task type UpdateTaskRequest struct { Task string `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` Sources []string `protobuf:"bytes,2,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } func (m *UpdateTaskRequest) String() string { return proto.CompactTextString(m) } func (*UpdateTaskRequest) ProtoMessage() {} func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{4} } func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateTaskRequest.Merge(m, src) } func (m *UpdateTaskRequest) XXX_Size() int { return m.Size() } func (m *UpdateTaskRequest) XXX_DiscardUnknown() { xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) } var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo func (m *UpdateTaskRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *UpdateTaskRequest) GetSources() []string { if m != nil { return m.Sources } return nil } type UpdateTaskResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` CheckResult string `protobuf:"bytes,4,opt,name=checkResult,proto3" json:"checkResult,omitempty"` } func (m *UpdateTaskResponse) Reset() { *m = UpdateTaskResponse{} } func (m *UpdateTaskResponse) String() string { return proto.CompactTextString(m) } func (*UpdateTaskResponse) ProtoMessage() {} func (*UpdateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{5} } func (m *UpdateTaskResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UpdateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UpdateTaskResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *UpdateTaskResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateTaskResponse.Merge(m, src) } func (m *UpdateTaskResponse) XXX_Size() int { return m.Size() } func (m *UpdateTaskResponse) XXX_DiscardUnknown() { xxx_messageInfo_UpdateTaskResponse.DiscardUnknown(m) } var xxx_messageInfo_UpdateTaskResponse proto.InternalMessageInfo func (m *UpdateTaskResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *UpdateTaskResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *UpdateTaskResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } func (m *UpdateTaskResponse) GetCheckResult() string { if m != nil { return m.CheckResult } return "" } type QueryStatusListRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Sources []string `protobuf:"bytes,2,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *QueryStatusListRequest) Reset() { *m = QueryStatusListRequest{} } func (m *QueryStatusListRequest) String() string { return proto.CompactTextString(m) } func (*QueryStatusListRequest) ProtoMessage() {} func (*QueryStatusListRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{6} } func (m *QueryStatusListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *QueryStatusListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_QueryStatusListRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *QueryStatusListRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryStatusListRequest.Merge(m, src) } func (m *QueryStatusListRequest) XXX_Size() int { return m.Size() } func (m *QueryStatusListRequest) XXX_DiscardUnknown() { xxx_messageInfo_QueryStatusListRequest.DiscardUnknown(m) } var xxx_messageInfo_QueryStatusListRequest proto.InternalMessageInfo func (m *QueryStatusListRequest) GetName() string { if m != nil { return m.Name } return "" } func (m *QueryStatusListRequest) GetSources() []string { if m != nil { return m.Sources } return nil } type QueryStatusListResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*QueryStatusResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *QueryStatusListResponse) Reset() { *m = QueryStatusListResponse{} } func (m *QueryStatusListResponse) String() string { return proto.CompactTextString(m) } func (*QueryStatusListResponse) ProtoMessage() {} func (*QueryStatusListResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{7} } func (m *QueryStatusListResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *QueryStatusListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_QueryStatusListResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *QueryStatusListResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryStatusListResponse.Merge(m, src) } func (m *QueryStatusListResponse) XXX_Size() int { return m.Size() } func (m *QueryStatusListResponse) XXX_DiscardUnknown() { xxx_messageInfo_QueryStatusListResponse.DiscardUnknown(m) } var xxx_messageInfo_QueryStatusListResponse proto.InternalMessageInfo func (m *QueryStatusListResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *QueryStatusListResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *QueryStatusListResponse) GetSources() []*QueryStatusResponse { if m != nil { return m.Sources } return nil } // ShowDDLLocksRequest used to query DDL locks which are un-resolved // task: task's name, empty for all tasks // sources: source need to query, empty for all sources // // any DDL lock in which the source is synced or unsynced will return // // if specify task and sources both, and sources not doing the task , it will return empty DDL locks type ShowDDLLocksRequest struct { Task string `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` Sources []string `protobuf:"bytes,2,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *ShowDDLLocksRequest) Reset() { *m = ShowDDLLocksRequest{} } func (m *ShowDDLLocksRequest) String() string { return proto.CompactTextString(m) } func (*ShowDDLLocksRequest) ProtoMessage() {} func (*ShowDDLLocksRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{8} } func (m *ShowDDLLocksRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ShowDDLLocksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ShowDDLLocksRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ShowDDLLocksRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ShowDDLLocksRequest.Merge(m, src) } func (m *ShowDDLLocksRequest) XXX_Size() int { return m.Size() } func (m *ShowDDLLocksRequest) XXX_DiscardUnknown() { xxx_messageInfo_ShowDDLLocksRequest.DiscardUnknown(m) } var xxx_messageInfo_ShowDDLLocksRequest proto.InternalMessageInfo func (m *ShowDDLLocksRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *ShowDDLLocksRequest) GetSources() []string { if m != nil { return m.Sources } return nil } // DDLLock represents a DDL lock info (I known the name confused with DDLLockInfo, any suggestion?) // it been sent from dm-master to dmctl // ID: DDL lock generated ID // task: lock's corresponding task name // mode: the shard DDL mode, `pessimistic` or `optimistic`. // owner: lock's owner, a dm-worker // DDL: DDL statement // synced: already synced dm-workers // unsynced: pending to sync dm-workers type DDLLock struct { ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` Task string `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` Mode string `protobuf:"bytes,3,opt,name=mode,proto3" json:"mode,omitempty"` Owner string `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` DDLs []string `protobuf:"bytes,5,rep,name=DDLs,proto3" json:"DDLs,omitempty"` Synced []string `protobuf:"bytes,6,rep,name=synced,proto3" json:"synced,omitempty"` Unsynced []string `protobuf:"bytes,7,rep,name=unsynced,proto3" json:"unsynced,omitempty"` } func (m *DDLLock) Reset() { *m = DDLLock{} } func (m *DDLLock) String() string { return proto.CompactTextString(m) } func (*DDLLock) ProtoMessage() {} func (*DDLLock) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{9} } func (m *DDLLock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *DDLLock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_DDLLock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *DDLLock) XXX_Merge(src proto.Message) { xxx_messageInfo_DDLLock.Merge(m, src) } func (m *DDLLock) XXX_Size() int { return m.Size() } func (m *DDLLock) XXX_DiscardUnknown() { xxx_messageInfo_DDLLock.DiscardUnknown(m) } var xxx_messageInfo_DDLLock proto.InternalMessageInfo func (m *DDLLock) GetID() string { if m != nil { return m.ID } return "" } func (m *DDLLock) GetTask() string { if m != nil { return m.Task } return "" } func (m *DDLLock) GetMode() string { if m != nil { return m.Mode } return "" } func (m *DDLLock) GetOwner() string { if m != nil { return m.Owner } return "" } func (m *DDLLock) GetDDLs() []string { if m != nil { return m.DDLs } return nil } func (m *DDLLock) GetSynced() []string { if m != nil { return m.Synced } return nil } func (m *DDLLock) GetUnsynced() []string { if m != nil { return m.Unsynced } return nil } type ShowDDLLocksResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Locks []*DDLLock `protobuf:"bytes,3,rep,name=locks,proto3" json:"locks,omitempty"` } func (m *ShowDDLLocksResponse) Reset() { *m = ShowDDLLocksResponse{} } func (m *ShowDDLLocksResponse) String() string { return proto.CompactTextString(m) } func (*ShowDDLLocksResponse) ProtoMessage() {} func (*ShowDDLLocksResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{10} } func (m *ShowDDLLocksResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ShowDDLLocksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ShowDDLLocksResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ShowDDLLocksResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ShowDDLLocksResponse.Merge(m, src) } func (m *ShowDDLLocksResponse) XXX_Size() int { return m.Size() } func (m *ShowDDLLocksResponse) XXX_DiscardUnknown() { xxx_messageInfo_ShowDDLLocksResponse.DiscardUnknown(m) } var xxx_messageInfo_ShowDDLLocksResponse proto.InternalMessageInfo func (m *ShowDDLLocksResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *ShowDDLLocksResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *ShowDDLLocksResponse) GetLocks() []*DDLLock { if m != nil { return m.Locks } return nil } // UnlockDDLLockRequest used to unlock (resolve) DDL lock manually // ID: DDL lock ID // replaceOwner: dm-worker used to replace the original DDL lock's owner // forceRemove: force to remove the DDL lock even fail to execute the DDL for the owner. type UnlockDDLLockRequest struct { ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` ReplaceOwner string `protobuf:"bytes,2,opt,name=replaceOwner,proto3" json:"replaceOwner,omitempty"` ForceRemove bool `protobuf:"varint,3,opt,name=forceRemove,proto3" json:"forceRemove,omitempty"` Op UnlockDDLLockOp `protobuf:"varint,4,opt,name=op,proto3,enum=pb.UnlockDDLLockOp" json:"op,omitempty"` Sources []string `protobuf:"bytes,5,rep,name=sources,proto3" json:"sources,omitempty"` Database string `protobuf:"bytes,6,opt,name=database,proto3" json:"database,omitempty"` Table string `protobuf:"bytes,7,opt,name=table,proto3" json:"table,omitempty"` } func (m *UnlockDDLLockRequest) Reset() { *m = UnlockDDLLockRequest{} } func (m *UnlockDDLLockRequest) String() string { return proto.CompactTextString(m) } func (*UnlockDDLLockRequest) ProtoMessage() {} func (*UnlockDDLLockRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{11} } func (m *UnlockDDLLockRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UnlockDDLLockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UnlockDDLLockRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *UnlockDDLLockRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UnlockDDLLockRequest.Merge(m, src) } func (m *UnlockDDLLockRequest) XXX_Size() int { return m.Size() } func (m *UnlockDDLLockRequest) XXX_DiscardUnknown() { xxx_messageInfo_UnlockDDLLockRequest.DiscardUnknown(m) } var xxx_messageInfo_UnlockDDLLockRequest proto.InternalMessageInfo func (m *UnlockDDLLockRequest) GetID() string { if m != nil { return m.ID } return "" } func (m *UnlockDDLLockRequest) GetReplaceOwner() string { if m != nil { return m.ReplaceOwner } return "" } func (m *UnlockDDLLockRequest) GetForceRemove() bool { if m != nil { return m.ForceRemove } return false } func (m *UnlockDDLLockRequest) GetOp() UnlockDDLLockOp { if m != nil { return m.Op } return UnlockDDLLockOp_InvalidLockOp } func (m *UnlockDDLLockRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *UnlockDDLLockRequest) GetDatabase() string { if m != nil { return m.Database } return "" } func (m *UnlockDDLLockRequest) GetTable() string { if m != nil { return m.Table } return "" } type UnlockDDLLockResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *UnlockDDLLockResponse) Reset() { *m = UnlockDDLLockResponse{} } func (m *UnlockDDLLockResponse) String() string { return proto.CompactTextString(m) } func (*UnlockDDLLockResponse) ProtoMessage() {} func (*UnlockDDLLockResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{12} } func (m *UnlockDDLLockResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UnlockDDLLockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UnlockDDLLockResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *UnlockDDLLockResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UnlockDDLLockResponse.Merge(m, src) } func (m *UnlockDDLLockResponse) XXX_Size() int { return m.Size() } func (m *UnlockDDLLockResponse) XXX_DiscardUnknown() { xxx_messageInfo_UnlockDDLLockResponse.DiscardUnknown(m) } var xxx_messageInfo_UnlockDDLLockResponse proto.InternalMessageInfo func (m *UnlockDDLLockResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *UnlockDDLLockResponse) GetMsg() string { if m != nil { return m.Msg } return "" } // OperateWorkerRelayRequest represents a request for some dm-workers to operate relay unit type OperateWorkerRelayRequest struct { Op RelayOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.RelayOp" json:"op,omitempty"` Sources []string `protobuf:"bytes,2,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *OperateWorkerRelayRequest) Reset() { *m = OperateWorkerRelayRequest{} } func (m *OperateWorkerRelayRequest) String() string { return proto.CompactTextString(m) } func (*OperateWorkerRelayRequest) ProtoMessage() {} func (*OperateWorkerRelayRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{13} } func (m *OperateWorkerRelayRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateWorkerRelayRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateWorkerRelayRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateWorkerRelayRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateWorkerRelayRequest.Merge(m, src) } func (m *OperateWorkerRelayRequest) XXX_Size() int { return m.Size() } func (m *OperateWorkerRelayRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateWorkerRelayRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateWorkerRelayRequest proto.InternalMessageInfo func (m *OperateWorkerRelayRequest) GetOp() RelayOp { if m != nil { return m.Op } return RelayOp_InvalidRelayOp } func (m *OperateWorkerRelayRequest) GetSources() []string { if m != nil { return m.Sources } return nil } type OperateWorkerRelayResponse struct { Op RelayOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.RelayOp" json:"op,omitempty"` Result bool `protobuf:"varint,2,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,3,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,4,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *OperateWorkerRelayResponse) Reset() { *m = OperateWorkerRelayResponse{} } func (m *OperateWorkerRelayResponse) String() string { return proto.CompactTextString(m) } func (*OperateWorkerRelayResponse) ProtoMessage() {} func (*OperateWorkerRelayResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{14} } func (m *OperateWorkerRelayResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateWorkerRelayResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateWorkerRelayResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateWorkerRelayResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateWorkerRelayResponse.Merge(m, src) } func (m *OperateWorkerRelayResponse) XXX_Size() int { return m.Size() } func (m *OperateWorkerRelayResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateWorkerRelayResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateWorkerRelayResponse proto.InternalMessageInfo func (m *OperateWorkerRelayResponse) GetOp() RelayOp { if m != nil { return m.Op } return RelayOp_InvalidRelayOp } func (m *OperateWorkerRelayResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateWorkerRelayResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *OperateWorkerRelayResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } // PurgeWorkerRelayRequest represents a request to purge relay log files for some dm-workers // workers: dm-workers need to purge relay log files // inactive: whether purge inactive relay log files // time: whether purge relay log files before this time, the number of seconds elapsed since January 1, 1970 UTC // filename: whether purge relay log files before this filename // subDir: specify relay sub directory for @filename type PurgeWorkerRelayRequest struct { Sources []string `protobuf:"bytes,1,rep,name=sources,proto3" json:"sources,omitempty"` Inactive bool `protobuf:"varint,2,opt,name=inactive,proto3" json:"inactive,omitempty"` Time int64 `protobuf:"varint,3,opt,name=time,proto3" json:"time,omitempty"` Filename string `protobuf:"bytes,4,opt,name=filename,proto3" json:"filename,omitempty"` SubDir string `protobuf:"bytes,5,opt,name=subDir,proto3" json:"subDir,omitempty"` } func (m *PurgeWorkerRelayRequest) Reset() { *m = PurgeWorkerRelayRequest{} } func (m *PurgeWorkerRelayRequest) String() string { return proto.CompactTextString(m) } func (*PurgeWorkerRelayRequest) ProtoMessage() {} func (*PurgeWorkerRelayRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{15} } func (m *PurgeWorkerRelayRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PurgeWorkerRelayRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PurgeWorkerRelayRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *PurgeWorkerRelayRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PurgeWorkerRelayRequest.Merge(m, src) } func (m *PurgeWorkerRelayRequest) XXX_Size() int { return m.Size() } func (m *PurgeWorkerRelayRequest) XXX_DiscardUnknown() { xxx_messageInfo_PurgeWorkerRelayRequest.DiscardUnknown(m) } var xxx_messageInfo_PurgeWorkerRelayRequest proto.InternalMessageInfo func (m *PurgeWorkerRelayRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *PurgeWorkerRelayRequest) GetInactive() bool { if m != nil { return m.Inactive } return false } func (m *PurgeWorkerRelayRequest) GetTime() int64 { if m != nil { return m.Time } return 0 } func (m *PurgeWorkerRelayRequest) GetFilename() string { if m != nil { return m.Filename } return "" } func (m *PurgeWorkerRelayRequest) GetSubDir() string { if m != nil { return m.SubDir } return "" } type PurgeWorkerRelayResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *PurgeWorkerRelayResponse) Reset() { *m = PurgeWorkerRelayResponse{} } func (m *PurgeWorkerRelayResponse) String() string { return proto.CompactTextString(m) } func (*PurgeWorkerRelayResponse) ProtoMessage() {} func (*PurgeWorkerRelayResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{16} } func (m *PurgeWorkerRelayResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PurgeWorkerRelayResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PurgeWorkerRelayResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *PurgeWorkerRelayResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_PurgeWorkerRelayResponse.Merge(m, src) } func (m *PurgeWorkerRelayResponse) XXX_Size() int { return m.Size() } func (m *PurgeWorkerRelayResponse) XXX_DiscardUnknown() { xxx_messageInfo_PurgeWorkerRelayResponse.DiscardUnknown(m) } var xxx_messageInfo_PurgeWorkerRelayResponse proto.InternalMessageInfo func (m *PurgeWorkerRelayResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *PurgeWorkerRelayResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *PurgeWorkerRelayResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type CheckTaskRequest struct { Task string `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` ErrCnt int64 `protobuf:"varint,2,opt,name=errCnt,proto3" json:"errCnt,omitempty"` WarnCnt int64 `protobuf:"varint,3,opt,name=warnCnt,proto3" json:"warnCnt,omitempty"` StartTime string `protobuf:"bytes,4,opt,name=startTime,proto3" json:"startTime,omitempty"` } func (m *CheckTaskRequest) Reset() { *m = CheckTaskRequest{} } func (m *CheckTaskRequest) String() string { return proto.CompactTextString(m) } func (*CheckTaskRequest) ProtoMessage() {} func (*CheckTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{17} } func (m *CheckTaskRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckTaskRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *CheckTaskRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckTaskRequest.Merge(m, src) } func (m *CheckTaskRequest) XXX_Size() int { return m.Size() } func (m *CheckTaskRequest) XXX_DiscardUnknown() { xxx_messageInfo_CheckTaskRequest.DiscardUnknown(m) } var xxx_messageInfo_CheckTaskRequest proto.InternalMessageInfo func (m *CheckTaskRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *CheckTaskRequest) GetErrCnt() int64 { if m != nil { return m.ErrCnt } return 0 } func (m *CheckTaskRequest) GetWarnCnt() int64 { if m != nil { return m.WarnCnt } return 0 } func (m *CheckTaskRequest) GetStartTime() string { if m != nil { return m.StartTime } return "" } type CheckTaskResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *CheckTaskResponse) Reset() { *m = CheckTaskResponse{} } func (m *CheckTaskResponse) String() string { return proto.CompactTextString(m) } func (*CheckTaskResponse) ProtoMessage() {} func (*CheckTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{18} } func (m *CheckTaskResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckTaskResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *CheckTaskResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckTaskResponse.Merge(m, src) } func (m *CheckTaskResponse) XXX_Size() int { return m.Size() } func (m *CheckTaskResponse) XXX_DiscardUnknown() { xxx_messageInfo_CheckTaskResponse.DiscardUnknown(m) } var xxx_messageInfo_CheckTaskResponse proto.InternalMessageInfo func (m *CheckTaskResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *CheckTaskResponse) GetMsg() string { if m != nil { return m.Msg } return "" } type OperateSourceRequest struct { Op SourceOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.SourceOp" json:"op,omitempty"` Config []string `protobuf:"bytes,2,rep,name=config,proto3" json:"config,omitempty"` SourceID []string `protobuf:"bytes,3,rep,name=sourceID,proto3" json:"sourceID,omitempty"` WorkerName string `protobuf:"bytes,4,opt,name=workerName,proto3" json:"workerName,omitempty"` } func (m *OperateSourceRequest) Reset() { *m = OperateSourceRequest{} } func (m *OperateSourceRequest) String() string { return proto.CompactTextString(m) } func (*OperateSourceRequest) ProtoMessage() {} func (*OperateSourceRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{19} } func (m *OperateSourceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateSourceRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateSourceRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateSourceRequest.Merge(m, src) } func (m *OperateSourceRequest) XXX_Size() int { return m.Size() } func (m *OperateSourceRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateSourceRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateSourceRequest proto.InternalMessageInfo func (m *OperateSourceRequest) GetOp() SourceOp { if m != nil { return m.Op } return SourceOp_InvalidSourceOp } func (m *OperateSourceRequest) GetConfig() []string { if m != nil { return m.Config } return nil } func (m *OperateSourceRequest) GetSourceID() []string { if m != nil { return m.SourceID } return nil } func (m *OperateSourceRequest) GetWorkerName() string { if m != nil { return m.WorkerName } return "" } type OperateSourceResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *OperateSourceResponse) Reset() { *m = OperateSourceResponse{} } func (m *OperateSourceResponse) String() string { return proto.CompactTextString(m) } func (*OperateSourceResponse) ProtoMessage() {} func (*OperateSourceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{20} } func (m *OperateSourceResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateSourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateSourceResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateSourceResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateSourceResponse.Merge(m, src) } func (m *OperateSourceResponse) XXX_Size() int { return m.Size() } func (m *OperateSourceResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateSourceResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateSourceResponse proto.InternalMessageInfo func (m *OperateSourceResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateSourceResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *OperateSourceResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type RegisterWorkerRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` } func (m *RegisterWorkerRequest) Reset() { *m = RegisterWorkerRequest{} } func (m *RegisterWorkerRequest) String() string { return proto.CompactTextString(m) } func (*RegisterWorkerRequest) ProtoMessage() {} func (*RegisterWorkerRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{21} } func (m *RegisterWorkerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RegisterWorkerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RegisterWorkerRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *RegisterWorkerRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_RegisterWorkerRequest.Merge(m, src) } func (m *RegisterWorkerRequest) XXX_Size() int { return m.Size() } func (m *RegisterWorkerRequest) XXX_DiscardUnknown() { xxx_messageInfo_RegisterWorkerRequest.DiscardUnknown(m) } var xxx_messageInfo_RegisterWorkerRequest proto.InternalMessageInfo func (m *RegisterWorkerRequest) GetName() string { if m != nil { return m.Name } return "" } func (m *RegisterWorkerRequest) GetAddress() string { if m != nil { return m.Address } return "" } type RegisterWorkerResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` SecretKey []byte `protobuf:"bytes,3,opt,name=secretKey,proto3" json:"secretKey,omitempty"` } func (m *RegisterWorkerResponse) Reset() { *m = RegisterWorkerResponse{} } func (m *RegisterWorkerResponse) String() string { return proto.CompactTextString(m) } func (*RegisterWorkerResponse) ProtoMessage() {} func (*RegisterWorkerResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{22} } func (m *RegisterWorkerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RegisterWorkerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RegisterWorkerResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *RegisterWorkerResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_RegisterWorkerResponse.Merge(m, src) } func (m *RegisterWorkerResponse) XXX_Size() int { return m.Size() } func (m *RegisterWorkerResponse) XXX_DiscardUnknown() { xxx_messageInfo_RegisterWorkerResponse.DiscardUnknown(m) } var xxx_messageInfo_RegisterWorkerResponse proto.InternalMessageInfo func (m *RegisterWorkerResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *RegisterWorkerResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *RegisterWorkerResponse) GetSecretKey() []byte { if m != nil { return m.SecretKey } return nil } type OfflineMemberRequest struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } func (m *OfflineMemberRequest) Reset() { *m = OfflineMemberRequest{} } func (m *OfflineMemberRequest) String() string { return proto.CompactTextString(m) } func (*OfflineMemberRequest) ProtoMessage() {} func (*OfflineMemberRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{23} } func (m *OfflineMemberRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OfflineMemberRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OfflineMemberRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OfflineMemberRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OfflineMemberRequest.Merge(m, src) } func (m *OfflineMemberRequest) XXX_Size() int { return m.Size() } func (m *OfflineMemberRequest) XXX_DiscardUnknown() { xxx_messageInfo_OfflineMemberRequest.DiscardUnknown(m) } var xxx_messageInfo_OfflineMemberRequest proto.InternalMessageInfo func (m *OfflineMemberRequest) GetType() string { if m != nil { return m.Type } return "" } func (m *OfflineMemberRequest) GetName() string { if m != nil { return m.Name } return "" } type OfflineMemberResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *OfflineMemberResponse) Reset() { *m = OfflineMemberResponse{} } func (m *OfflineMemberResponse) String() string { return proto.CompactTextString(m) } func (*OfflineMemberResponse) ProtoMessage() {} func (*OfflineMemberResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{24} } func (m *OfflineMemberResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OfflineMemberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OfflineMemberResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OfflineMemberResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OfflineMemberResponse.Merge(m, src) } func (m *OfflineMemberResponse) XXX_Size() int { return m.Size() } func (m *OfflineMemberResponse) XXX_DiscardUnknown() { xxx_messageInfo_OfflineMemberResponse.DiscardUnknown(m) } var xxx_messageInfo_OfflineMemberResponse proto.InternalMessageInfo func (m *OfflineMemberResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OfflineMemberResponse) GetMsg() string { if m != nil { return m.Msg } return "" } type OperateLeaderRequest struct { Op LeaderOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.LeaderOp" json:"op,omitempty"` } func (m *OperateLeaderRequest) Reset() { *m = OperateLeaderRequest{} } func (m *OperateLeaderRequest) String() string { return proto.CompactTextString(m) } func (*OperateLeaderRequest) ProtoMessage() {} func (*OperateLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{25} } func (m *OperateLeaderRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateLeaderRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateLeaderRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateLeaderRequest.Merge(m, src) } func (m *OperateLeaderRequest) XXX_Size() int { return m.Size() } func (m *OperateLeaderRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateLeaderRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateLeaderRequest proto.InternalMessageInfo func (m *OperateLeaderRequest) GetOp() LeaderOp { if m != nil { return m.Op } return LeaderOp_InvalidLeaderOp } type OperateLeaderResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *OperateLeaderResponse) Reset() { *m = OperateLeaderResponse{} } func (m *OperateLeaderResponse) String() string { return proto.CompactTextString(m) } func (*OperateLeaderResponse) ProtoMessage() {} func (*OperateLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{26} } func (m *OperateLeaderResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateLeaderResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateLeaderResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateLeaderResponse.Merge(m, src) } func (m *OperateLeaderResponse) XXX_Size() int { return m.Size() } func (m *OperateLeaderResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateLeaderResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateLeaderResponse proto.InternalMessageInfo func (m *OperateLeaderResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateLeaderResponse) GetMsg() string { if m != nil { return m.Msg } return "" } type MasterInfo struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` Alive bool `protobuf:"varint,3,opt,name=alive,proto3" json:"alive,omitempty"` PeerURLs []string `protobuf:"bytes,4,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` ClientURLs []string `protobuf:"bytes,5,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"` } func (m *MasterInfo) Reset() { *m = MasterInfo{} } func (m *MasterInfo) String() string { return proto.CompactTextString(m) } func (*MasterInfo) ProtoMessage() {} func (*MasterInfo) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{27} } func (m *MasterInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *MasterInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_MasterInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *MasterInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_MasterInfo.Merge(m, src) } func (m *MasterInfo) XXX_Size() int { return m.Size() } func (m *MasterInfo) XXX_DiscardUnknown() { xxx_messageInfo_MasterInfo.DiscardUnknown(m) } var xxx_messageInfo_MasterInfo proto.InternalMessageInfo func (m *MasterInfo) GetName() string { if m != nil { return m.Name } return "" } func (m *MasterInfo) GetMemberID() uint64 { if m != nil { return m.MemberID } return 0 } func (m *MasterInfo) GetAlive() bool { if m != nil { return m.Alive } return false } func (m *MasterInfo) GetPeerURLs() []string { if m != nil { return m.PeerURLs } return nil } func (m *MasterInfo) GetClientURLs() []string { if m != nil { return m.ClientURLs } return nil } type WorkerInfo struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` Stage string `protobuf:"bytes,3,opt,name=stage,proto3" json:"stage,omitempty"` Source string `protobuf:"bytes,4,opt,name=source,proto3" json:"source,omitempty"` } func (m *WorkerInfo) Reset() { *m = WorkerInfo{} } func (m *WorkerInfo) String() string { return proto.CompactTextString(m) } func (*WorkerInfo) ProtoMessage() {} func (*WorkerInfo) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{28} } func (m *WorkerInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *WorkerInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_WorkerInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *WorkerInfo) XXX_Merge(src proto.Message) { xxx_messageInfo_WorkerInfo.Merge(m, src) } func (m *WorkerInfo) XXX_Size() int { return m.Size() } func (m *WorkerInfo) XXX_DiscardUnknown() { xxx_messageInfo_WorkerInfo.DiscardUnknown(m) } var xxx_messageInfo_WorkerInfo proto.InternalMessageInfo func (m *WorkerInfo) GetName() string { if m != nil { return m.Name } return "" } func (m *WorkerInfo) GetAddr() string { if m != nil { return m.Addr } return "" } func (m *WorkerInfo) GetStage() string { if m != nil { return m.Stage } return "" } func (m *WorkerInfo) GetSource() string { if m != nil { return m.Source } return "" } type ListLeaderMember struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` } func (m *ListLeaderMember) Reset() { *m = ListLeaderMember{} } func (m *ListLeaderMember) String() string { return proto.CompactTextString(m) } func (*ListLeaderMember) ProtoMessage() {} func (*ListLeaderMember) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{29} } func (m *ListLeaderMember) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ListLeaderMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ListLeaderMember.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ListLeaderMember) XXX_Merge(src proto.Message) { xxx_messageInfo_ListLeaderMember.Merge(m, src) } func (m *ListLeaderMember) XXX_Size() int { return m.Size() } func (m *ListLeaderMember) XXX_DiscardUnknown() { xxx_messageInfo_ListLeaderMember.DiscardUnknown(m) } var xxx_messageInfo_ListLeaderMember proto.InternalMessageInfo func (m *ListLeaderMember) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *ListLeaderMember) GetName() string { if m != nil { return m.Name } return "" } func (m *ListLeaderMember) GetAddr() string { if m != nil { return m.Addr } return "" } type ListMasterMember struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` Masters []*MasterInfo `protobuf:"bytes,2,rep,name=masters,proto3" json:"masters,omitempty"` } func (m *ListMasterMember) Reset() { *m = ListMasterMember{} } func (m *ListMasterMember) String() string { return proto.CompactTextString(m) } func (*ListMasterMember) ProtoMessage() {} func (*ListMasterMember) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{30} } func (m *ListMasterMember) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ListMasterMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ListMasterMember.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ListMasterMember) XXX_Merge(src proto.Message) { xxx_messageInfo_ListMasterMember.Merge(m, src) } func (m *ListMasterMember) XXX_Size() int { return m.Size() } func (m *ListMasterMember) XXX_DiscardUnknown() { xxx_messageInfo_ListMasterMember.DiscardUnknown(m) } var xxx_messageInfo_ListMasterMember proto.InternalMessageInfo func (m *ListMasterMember) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *ListMasterMember) GetMasters() []*MasterInfo { if m != nil { return m.Masters } return nil } type ListWorkerMember struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` Workers []*WorkerInfo `protobuf:"bytes,2,rep,name=workers,proto3" json:"workers,omitempty"` } func (m *ListWorkerMember) Reset() { *m = ListWorkerMember{} } func (m *ListWorkerMember) String() string { return proto.CompactTextString(m) } func (*ListWorkerMember) ProtoMessage() {} func (*ListWorkerMember) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{31} } func (m *ListWorkerMember) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ListWorkerMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ListWorkerMember.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ListWorkerMember) XXX_Merge(src proto.Message) { xxx_messageInfo_ListWorkerMember.Merge(m, src) } func (m *ListWorkerMember) XXX_Size() int { return m.Size() } func (m *ListWorkerMember) XXX_DiscardUnknown() { xxx_messageInfo_ListWorkerMember.DiscardUnknown(m) } var xxx_messageInfo_ListWorkerMember proto.InternalMessageInfo func (m *ListWorkerMember) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *ListWorkerMember) GetWorkers() []*WorkerInfo { if m != nil { return m.Workers } return nil } type Members struct { // Types that are valid to be assigned to Member: // // *Members_Leader // *Members_Master // *Members_Worker Member isMembers_Member `protobuf_oneof:"member"` } func (m *Members) Reset() { *m = Members{} } func (m *Members) String() string { return proto.CompactTextString(m) } func (*Members) ProtoMessage() {} func (*Members) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{32} } func (m *Members) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Members) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Members.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *Members) XXX_Merge(src proto.Message) { xxx_messageInfo_Members.Merge(m, src) } func (m *Members) XXX_Size() int { return m.Size() } func (m *Members) XXX_DiscardUnknown() { xxx_messageInfo_Members.DiscardUnknown(m) } var xxx_messageInfo_Members proto.InternalMessageInfo type isMembers_Member interface { isMembers_Member() MarshalTo([]byte) (int, error) Size() int } type Members_Leader struct { Leader *ListLeaderMember `protobuf:"bytes,1,opt,name=leader,proto3,oneof" json:"leader,omitempty"` } type Members_Master struct { Master *ListMasterMember `protobuf:"bytes,2,opt,name=master,proto3,oneof" json:"master,omitempty"` } type Members_Worker struct { Worker *ListWorkerMember `protobuf:"bytes,3,opt,name=worker,proto3,oneof" json:"worker,omitempty"` } func (*Members_Leader) isMembers_Member() {} func (*Members_Master) isMembers_Member() {} func (*Members_Worker) isMembers_Member() {} func (m *Members) GetMember() isMembers_Member { if m != nil { return m.Member } return nil } func (m *Members) GetLeader() *ListLeaderMember { if x, ok := m.GetMember().(*Members_Leader); ok { return x.Leader } return nil } func (m *Members) GetMaster() *ListMasterMember { if x, ok := m.GetMember().(*Members_Master); ok { return x.Master } return nil } func (m *Members) GetWorker() *ListWorkerMember { if x, ok := m.GetMember().(*Members_Worker); ok { return x.Worker } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Members) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Members_Leader)(nil), (*Members_Master)(nil), (*Members_Worker)(nil), } } type ListMemberRequest struct { Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"` Master bool `protobuf:"varint,2,opt,name=master,proto3" json:"master,omitempty"` Worker bool `protobuf:"varint,3,opt,name=worker,proto3" json:"worker,omitempty"` Names []string `protobuf:"bytes,4,rep,name=names,proto3" json:"names,omitempty"` } func (m *ListMemberRequest) Reset() { *m = ListMemberRequest{} } func (m *ListMemberRequest) String() string { return proto.CompactTextString(m) } func (*ListMemberRequest) ProtoMessage() {} func (*ListMemberRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{33} } func (m *ListMemberRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ListMemberRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ListMemberRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ListMemberRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_ListMemberRequest.Merge(m, src) } func (m *ListMemberRequest) XXX_Size() int { return m.Size() } func (m *ListMemberRequest) XXX_DiscardUnknown() { xxx_messageInfo_ListMemberRequest.DiscardUnknown(m) } var xxx_messageInfo_ListMemberRequest proto.InternalMessageInfo func (m *ListMemberRequest) GetLeader() bool { if m != nil { return m.Leader } return false } func (m *ListMemberRequest) GetMaster() bool { if m != nil { return m.Master } return false } func (m *ListMemberRequest) GetWorker() bool { if m != nil { return m.Worker } return false } func (m *ListMemberRequest) GetNames() []string { if m != nil { return m.Names } return nil } type ListMemberResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Members []*Members `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` } func (m *ListMemberResponse) Reset() { *m = ListMemberResponse{} } func (m *ListMemberResponse) String() string { return proto.CompactTextString(m) } func (*ListMemberResponse) ProtoMessage() {} func (*ListMemberResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{34} } func (m *ListMemberResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ListMemberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ListMemberResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ListMemberResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ListMemberResponse.Merge(m, src) } func (m *ListMemberResponse) XXX_Size() int { return m.Size() } func (m *ListMemberResponse) XXX_DiscardUnknown() { xxx_messageInfo_ListMemberResponse.DiscardUnknown(m) } var xxx_messageInfo_ListMemberResponse proto.InternalMessageInfo func (m *ListMemberResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *ListMemberResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *ListMemberResponse) GetMembers() []*Members { if m != nil { return m.Members } return nil } type OperateSchemaRequest struct { Op SchemaOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.SchemaOp" json:"op,omitempty"` Task string `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` Sources []string `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"` Table string `protobuf:"bytes,5,opt,name=table,proto3" json:"table,omitempty"` Schema string `protobuf:"bytes,6,opt,name=schema,proto3" json:"schema,omitempty"` Flush bool `protobuf:"varint,7,opt,name=flush,proto3" json:"flush,omitempty"` Sync bool `protobuf:"varint,8,opt,name=sync,proto3" json:"sync,omitempty"` FromSource bool `protobuf:"varint,9,opt,name=fromSource,proto3" json:"fromSource,omitempty"` FromTarget bool `protobuf:"varint,10,opt,name=fromTarget,proto3" json:"fromTarget,omitempty"` } func (m *OperateSchemaRequest) Reset() { *m = OperateSchemaRequest{} } func (m *OperateSchemaRequest) String() string { return proto.CompactTextString(m) } func (*OperateSchemaRequest) ProtoMessage() {} func (*OperateSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{35} } func (m *OperateSchemaRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateSchemaRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateSchemaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateSchemaRequest.Merge(m, src) } func (m *OperateSchemaRequest) XXX_Size() int { return m.Size() } func (m *OperateSchemaRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateSchemaRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateSchemaRequest proto.InternalMessageInfo func (m *OperateSchemaRequest) GetOp() SchemaOp { if m != nil { return m.Op } return SchemaOp_InvalidSchemaOp } func (m *OperateSchemaRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *OperateSchemaRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *OperateSchemaRequest) GetDatabase() string { if m != nil { return m.Database } return "" } func (m *OperateSchemaRequest) GetTable() string { if m != nil { return m.Table } return "" } func (m *OperateSchemaRequest) GetSchema() string { if m != nil { return m.Schema } return "" } func (m *OperateSchemaRequest) GetFlush() bool { if m != nil { return m.Flush } return false } func (m *OperateSchemaRequest) GetSync() bool { if m != nil { return m.Sync } return false } func (m *OperateSchemaRequest) GetFromSource() bool { if m != nil { return m.FromSource } return false } func (m *OperateSchemaRequest) GetFromTarget() bool { if m != nil { return m.FromTarget } return false } type OperateSchemaResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *OperateSchemaResponse) Reset() { *m = OperateSchemaResponse{} } func (m *OperateSchemaResponse) String() string { return proto.CompactTextString(m) } func (*OperateSchemaResponse) ProtoMessage() {} func (*OperateSchemaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{36} } func (m *OperateSchemaResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateSchemaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateSchemaResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateSchemaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateSchemaResponse.Merge(m, src) } func (m *OperateSchemaResponse) XXX_Size() int { return m.Size() } func (m *OperateSchemaResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateSchemaResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateSchemaResponse proto.InternalMessageInfo func (m *OperateSchemaResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateSchemaResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *OperateSchemaResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type GetSubTaskCfgRequest struct { // the task name Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (m *GetSubTaskCfgRequest) Reset() { *m = GetSubTaskCfgRequest{} } func (m *GetSubTaskCfgRequest) String() string { return proto.CompactTextString(m) } func (*GetSubTaskCfgRequest) ProtoMessage() {} func (*GetSubTaskCfgRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{37} } func (m *GetSubTaskCfgRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetSubTaskCfgRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetSubTaskCfgRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetSubTaskCfgRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSubTaskCfgRequest.Merge(m, src) } func (m *GetSubTaskCfgRequest) XXX_Size() int { return m.Size() } func (m *GetSubTaskCfgRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetSubTaskCfgRequest.DiscardUnknown(m) } var xxx_messageInfo_GetSubTaskCfgRequest proto.InternalMessageInfo func (m *GetSubTaskCfgRequest) GetName() string { if m != nil { return m.Name } return "" } type GetSubTaskCfgResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Cfgs []string `protobuf:"bytes,3,rep,name=cfgs,proto3" json:"cfgs,omitempty"` } func (m *GetSubTaskCfgResponse) Reset() { *m = GetSubTaskCfgResponse{} } func (m *GetSubTaskCfgResponse) String() string { return proto.CompactTextString(m) } func (*GetSubTaskCfgResponse) ProtoMessage() {} func (*GetSubTaskCfgResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{38} } func (m *GetSubTaskCfgResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetSubTaskCfgResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetSubTaskCfgResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetSubTaskCfgResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetSubTaskCfgResponse.Merge(m, src) } func (m *GetSubTaskCfgResponse) XXX_Size() int { return m.Size() } func (m *GetSubTaskCfgResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetSubTaskCfgResponse.DiscardUnknown(m) } var xxx_messageInfo_GetSubTaskCfgResponse proto.InternalMessageInfo func (m *GetSubTaskCfgResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *GetSubTaskCfgResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *GetSubTaskCfgResponse) GetCfgs() []string { if m != nil { return m.Cfgs } return nil } type GetCfgRequest struct { Type CfgType `protobuf:"varint,1,opt,name=type,proto3,enum=pb.CfgType" json:"type,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` } func (m *GetCfgRequest) Reset() { *m = GetCfgRequest{} } func (m *GetCfgRequest) String() string { return proto.CompactTextString(m) } func (*GetCfgRequest) ProtoMessage() {} func (*GetCfgRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{39} } func (m *GetCfgRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetCfgRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetCfgRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetCfgRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetCfgRequest.Merge(m, src) } func (m *GetCfgRequest) XXX_Size() int { return m.Size() } func (m *GetCfgRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetCfgRequest.DiscardUnknown(m) } var xxx_messageInfo_GetCfgRequest proto.InternalMessageInfo func (m *GetCfgRequest) GetType() CfgType { if m != nil { return m.Type } return CfgType_InvalidType } func (m *GetCfgRequest) GetName() string { if m != nil { return m.Name } return "" } type GetCfgResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Cfg string `protobuf:"bytes,3,opt,name=cfg,proto3" json:"cfg,omitempty"` } func (m *GetCfgResponse) Reset() { *m = GetCfgResponse{} } func (m *GetCfgResponse) String() string { return proto.CompactTextString(m) } func (*GetCfgResponse) ProtoMessage() {} func (*GetCfgResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{40} } func (m *GetCfgResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetCfgResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetCfgResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetCfgResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetCfgResponse.Merge(m, src) } func (m *GetCfgResponse) XXX_Size() int { return m.Size() } func (m *GetCfgResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetCfgResponse.DiscardUnknown(m) } var xxx_messageInfo_GetCfgResponse proto.InternalMessageInfo func (m *GetCfgResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *GetCfgResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *GetCfgResponse) GetCfg() string { if m != nil { return m.Cfg } return "" } type GetMasterCfgRequest struct { } func (m *GetMasterCfgRequest) Reset() { *m = GetMasterCfgRequest{} } func (m *GetMasterCfgRequest) String() string { return proto.CompactTextString(m) } func (*GetMasterCfgRequest) ProtoMessage() {} func (*GetMasterCfgRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{41} } func (m *GetMasterCfgRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetMasterCfgRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetMasterCfgRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetMasterCfgRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetMasterCfgRequest.Merge(m, src) } func (m *GetMasterCfgRequest) XXX_Size() int { return m.Size() } func (m *GetMasterCfgRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetMasterCfgRequest.DiscardUnknown(m) } var xxx_messageInfo_GetMasterCfgRequest proto.InternalMessageInfo type GetMasterCfgResponse struct { Cfg string `protobuf:"bytes,1,opt,name=cfg,proto3" json:"cfg,omitempty"` } func (m *GetMasterCfgResponse) Reset() { *m = GetMasterCfgResponse{} } func (m *GetMasterCfgResponse) String() string { return proto.CompactTextString(m) } func (*GetMasterCfgResponse) ProtoMessage() {} func (*GetMasterCfgResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{42} } func (m *GetMasterCfgResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetMasterCfgResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetMasterCfgResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetMasterCfgResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetMasterCfgResponse.Merge(m, src) } func (m *GetMasterCfgResponse) XXX_Size() int { return m.Size() } func (m *GetMasterCfgResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetMasterCfgResponse.DiscardUnknown(m) } var xxx_messageInfo_GetMasterCfgResponse proto.InternalMessageInfo func (m *GetMasterCfgResponse) GetCfg() string { if m != nil { return m.Cfg } return "" } type HandleErrorRequest struct { Op ErrorOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.ErrorOp" json:"op,omitempty"` Task string `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` Sources []string `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` BinlogPos string `protobuf:"bytes,4,opt,name=binlogPos,proto3" json:"binlogPos,omitempty"` Sqls []string `protobuf:"bytes,5,rep,name=sqls,proto3" json:"sqls,omitempty"` } func (m *HandleErrorRequest) Reset() { *m = HandleErrorRequest{} } func (m *HandleErrorRequest) String() string { return proto.CompactTextString(m) } func (*HandleErrorRequest) ProtoMessage() {} func (*HandleErrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{43} } func (m *HandleErrorRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HandleErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_HandleErrorRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *HandleErrorRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_HandleErrorRequest.Merge(m, src) } func (m *HandleErrorRequest) XXX_Size() int { return m.Size() } func (m *HandleErrorRequest) XXX_DiscardUnknown() { xxx_messageInfo_HandleErrorRequest.DiscardUnknown(m) } var xxx_messageInfo_HandleErrorRequest proto.InternalMessageInfo func (m *HandleErrorRequest) GetOp() ErrorOp { if m != nil { return m.Op } return ErrorOp_InvalidErrorOp } func (m *HandleErrorRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *HandleErrorRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *HandleErrorRequest) GetBinlogPos() string { if m != nil { return m.BinlogPos } return "" } func (m *HandleErrorRequest) GetSqls() []string { if m != nil { return m.Sqls } return nil } type HandleErrorResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *HandleErrorResponse) Reset() { *m = HandleErrorResponse{} } func (m *HandleErrorResponse) String() string { return proto.CompactTextString(m) } func (*HandleErrorResponse) ProtoMessage() {} func (*HandleErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{44} } func (m *HandleErrorResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HandleErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_HandleErrorResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *HandleErrorResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_HandleErrorResponse.Merge(m, src) } func (m *HandleErrorResponse) XXX_Size() int { return m.Size() } func (m *HandleErrorResponse) XXX_DiscardUnknown() { xxx_messageInfo_HandleErrorResponse.DiscardUnknown(m) } var xxx_messageInfo_HandleErrorResponse proto.InternalMessageInfo func (m *HandleErrorResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *HandleErrorResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *HandleErrorResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type TransferSourceRequest struct { Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` Worker string `protobuf:"bytes,2,opt,name=worker,proto3" json:"worker,omitempty"` } func (m *TransferSourceRequest) Reset() { *m = TransferSourceRequest{} } func (m *TransferSourceRequest) String() string { return proto.CompactTextString(m) } func (*TransferSourceRequest) ProtoMessage() {} func (*TransferSourceRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{45} } func (m *TransferSourceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TransferSourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_TransferSourceRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *TransferSourceRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_TransferSourceRequest.Merge(m, src) } func (m *TransferSourceRequest) XXX_Size() int { return m.Size() } func (m *TransferSourceRequest) XXX_DiscardUnknown() { xxx_messageInfo_TransferSourceRequest.DiscardUnknown(m) } var xxx_messageInfo_TransferSourceRequest proto.InternalMessageInfo func (m *TransferSourceRequest) GetSource() string { if m != nil { return m.Source } return "" } func (m *TransferSourceRequest) GetWorker() string { if m != nil { return m.Worker } return "" } type TransferSourceResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *TransferSourceResponse) Reset() { *m = TransferSourceResponse{} } func (m *TransferSourceResponse) String() string { return proto.CompactTextString(m) } func (*TransferSourceResponse) ProtoMessage() {} func (*TransferSourceResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{46} } func (m *TransferSourceResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TransferSourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_TransferSourceResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *TransferSourceResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_TransferSourceResponse.Merge(m, src) } func (m *TransferSourceResponse) XXX_Size() int { return m.Size() } func (m *TransferSourceResponse) XXX_DiscardUnknown() { xxx_messageInfo_TransferSourceResponse.DiscardUnknown(m) } var xxx_messageInfo_TransferSourceResponse proto.InternalMessageInfo func (m *TransferSourceResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *TransferSourceResponse) GetMsg() string { if m != nil { return m.Msg } return "" } type OperateRelayRequest struct { Op RelayOpV2 `protobuf:"varint,1,opt,name=op,proto3,enum=pb.RelayOpV2" json:"op,omitempty"` Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` Worker []string `protobuf:"bytes,3,rep,name=worker,proto3" json:"worker,omitempty"` } func (m *OperateRelayRequest) Reset() { *m = OperateRelayRequest{} } func (m *OperateRelayRequest) String() string { return proto.CompactTextString(m) } func (*OperateRelayRequest) ProtoMessage() {} func (*OperateRelayRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{47} } func (m *OperateRelayRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateRelayRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateRelayRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateRelayRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateRelayRequest.Merge(m, src) } func (m *OperateRelayRequest) XXX_Size() int { return m.Size() } func (m *OperateRelayRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateRelayRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateRelayRequest proto.InternalMessageInfo func (m *OperateRelayRequest) GetOp() RelayOpV2 { if m != nil { return m.Op } return RelayOpV2_InvalidRelayOpV2 } func (m *OperateRelayRequest) GetSource() string { if m != nil { return m.Source } return "" } func (m *OperateRelayRequest) GetWorker() []string { if m != nil { return m.Worker } return nil } type OperateRelayResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *OperateRelayResponse) Reset() { *m = OperateRelayResponse{} } func (m *OperateRelayResponse) String() string { return proto.CompactTextString(m) } func (*OperateRelayResponse) ProtoMessage() {} func (*OperateRelayResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{48} } func (m *OperateRelayResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateRelayResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateRelayResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateRelayResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateRelayResponse.Merge(m, src) } func (m *OperateRelayResponse) XXX_Size() int { return m.Size() } func (m *OperateRelayResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateRelayResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateRelayResponse proto.InternalMessageInfo func (m *OperateRelayResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateRelayResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *OperateRelayResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type StartValidationRequest struct { // proto3 support optional, but need to upgrade protoc-gen-gogofaster, not do it in this pr // see https://github.com/protocolbuffers/protobuf/issues/1606 // // Types that are valid to be assigned to Mode: // // *StartValidationRequest_ModeValue Mode isStartValidationRequest_Mode `protobuf_oneof:"mode"` // Types that are valid to be assigned to StartTime: // // *StartValidationRequest_StartTimeValue StartTime isStartValidationRequest_StartTime `protobuf_oneof:"startTime"` Sources []string `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` TaskName string `protobuf:"bytes,4,opt,name=taskName,proto3" json:"taskName,omitempty"` } func (m *StartValidationRequest) Reset() { *m = StartValidationRequest{} } func (m *StartValidationRequest) String() string { return proto.CompactTextString(m) } func (*StartValidationRequest) ProtoMessage() {} func (*StartValidationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{49} } func (m *StartValidationRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *StartValidationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_StartValidationRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *StartValidationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StartValidationRequest.Merge(m, src) } func (m *StartValidationRequest) XXX_Size() int { return m.Size() } func (m *StartValidationRequest) XXX_DiscardUnknown() { xxx_messageInfo_StartValidationRequest.DiscardUnknown(m) } var xxx_messageInfo_StartValidationRequest proto.InternalMessageInfo type isStartValidationRequest_Mode interface { isStartValidationRequest_Mode() MarshalTo([]byte) (int, error) Size() int } type isStartValidationRequest_StartTime interface { isStartValidationRequest_StartTime() MarshalTo([]byte) (int, error) Size() int } type StartValidationRequest_ModeValue struct { ModeValue string `protobuf:"bytes,1,opt,name=mode_value,json=modeValue,proto3,oneof" json:"mode_value,omitempty"` } type StartValidationRequest_StartTimeValue struct { StartTimeValue string `protobuf:"bytes,2,opt,name=start_time_value,json=startTimeValue,proto3,oneof" json:"start_time_value,omitempty"` } func (*StartValidationRequest_ModeValue) isStartValidationRequest_Mode() {} func (*StartValidationRequest_StartTimeValue) isStartValidationRequest_StartTime() {} func (m *StartValidationRequest) GetMode() isStartValidationRequest_Mode { if m != nil { return m.Mode } return nil } func (m *StartValidationRequest) GetStartTime() isStartValidationRequest_StartTime { if m != nil { return m.StartTime } return nil } func (m *StartValidationRequest) GetModeValue() string { if x, ok := m.GetMode().(*StartValidationRequest_ModeValue); ok { return x.ModeValue } return "" } func (m *StartValidationRequest) GetStartTimeValue() string { if x, ok := m.GetStartTime().(*StartValidationRequest_StartTimeValue); ok { return x.StartTimeValue } return "" } func (m *StartValidationRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *StartValidationRequest) GetTaskName() string { if m != nil { return m.TaskName } return "" } // XXX_OneofWrappers is for the internal use of the proto package. func (*StartValidationRequest) XXX_OneofWrappers() []interface{} { return []interface{}{ (*StartValidationRequest_ModeValue)(nil), (*StartValidationRequest_StartTimeValue)(nil), } } type StartValidationResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *StartValidationResponse) Reset() { *m = StartValidationResponse{} } func (m *StartValidationResponse) String() string { return proto.CompactTextString(m) } func (*StartValidationResponse) ProtoMessage() {} func (*StartValidationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{50} } func (m *StartValidationResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *StartValidationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_StartValidationResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *StartValidationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StartValidationResponse.Merge(m, src) } func (m *StartValidationResponse) XXX_Size() int { return m.Size() } func (m *StartValidationResponse) XXX_DiscardUnknown() { xxx_messageInfo_StartValidationResponse.DiscardUnknown(m) } var xxx_messageInfo_StartValidationResponse proto.InternalMessageInfo func (m *StartValidationResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *StartValidationResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *StartValidationResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type StopValidationRequest struct { Sources []string `protobuf:"bytes,1,rep,name=sources,proto3" json:"sources,omitempty"` TaskName string `protobuf:"bytes,2,opt,name=taskName,proto3" json:"taskName,omitempty"` } func (m *StopValidationRequest) Reset() { *m = StopValidationRequest{} } func (m *StopValidationRequest) String() string { return proto.CompactTextString(m) } func (*StopValidationRequest) ProtoMessage() {} func (*StopValidationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{51} } func (m *StopValidationRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *StopValidationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_StopValidationRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *StopValidationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_StopValidationRequest.Merge(m, src) } func (m *StopValidationRequest) XXX_Size() int { return m.Size() } func (m *StopValidationRequest) XXX_DiscardUnknown() { xxx_messageInfo_StopValidationRequest.DiscardUnknown(m) } var xxx_messageInfo_StopValidationRequest proto.InternalMessageInfo func (m *StopValidationRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *StopValidationRequest) GetTaskName() string { if m != nil { return m.TaskName } return "" } type StopValidationResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *StopValidationResponse) Reset() { *m = StopValidationResponse{} } func (m *StopValidationResponse) String() string { return proto.CompactTextString(m) } func (*StopValidationResponse) ProtoMessage() {} func (*StopValidationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{52} } func (m *StopValidationResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *StopValidationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_StopValidationResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *StopValidationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_StopValidationResponse.Merge(m, src) } func (m *StopValidationResponse) XXX_Size() int { return m.Size() } func (m *StopValidationResponse) XXX_DiscardUnknown() { xxx_messageInfo_StopValidationResponse.DiscardUnknown(m) } var xxx_messageInfo_StopValidationResponse proto.InternalMessageInfo func (m *StopValidationResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *StopValidationResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *StopValidationResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type UpdateValidationRequest struct { TaskName string `protobuf:"bytes,1,opt,name=taskName,proto3" json:"taskName,omitempty"` Sources []string `protobuf:"bytes,2,rep,name=sources,proto3" json:"sources,omitempty"` BinlogPos string `protobuf:"bytes,3,opt,name=binlogPos,proto3" json:"binlogPos,omitempty"` BinlogGTID string `protobuf:"bytes,4,opt,name=binlogGTID,proto3" json:"binlogGTID,omitempty"` } func (m *UpdateValidationRequest) Reset() { *m = UpdateValidationRequest{} } func (m *UpdateValidationRequest) String() string { return proto.CompactTextString(m) } func (*UpdateValidationRequest) ProtoMessage() {} func (*UpdateValidationRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{53} } func (m *UpdateValidationRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UpdateValidationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UpdateValidationRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *UpdateValidationRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateValidationRequest.Merge(m, src) } func (m *UpdateValidationRequest) XXX_Size() int { return m.Size() } func (m *UpdateValidationRequest) XXX_DiscardUnknown() { xxx_messageInfo_UpdateValidationRequest.DiscardUnknown(m) } var xxx_messageInfo_UpdateValidationRequest proto.InternalMessageInfo func (m *UpdateValidationRequest) GetTaskName() string { if m != nil { return m.TaskName } return "" } func (m *UpdateValidationRequest) GetSources() []string { if m != nil { return m.Sources } return nil } func (m *UpdateValidationRequest) GetBinlogPos() string { if m != nil { return m.BinlogPos } return "" } func (m *UpdateValidationRequest) GetBinlogGTID() string { if m != nil { return m.BinlogGTID } return "" } type UpdateValidationResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Sources []*CommonWorkerResponse `protobuf:"bytes,3,rep,name=sources,proto3" json:"sources,omitempty"` } func (m *UpdateValidationResponse) Reset() { *m = UpdateValidationResponse{} } func (m *UpdateValidationResponse) String() string { return proto.CompactTextString(m) } func (*UpdateValidationResponse) ProtoMessage() {} func (*UpdateValidationResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{54} } func (m *UpdateValidationResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UpdateValidationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UpdateValidationResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *UpdateValidationResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateValidationResponse.Merge(m, src) } func (m *UpdateValidationResponse) XXX_Size() int { return m.Size() } func (m *UpdateValidationResponse) XXX_DiscardUnknown() { xxx_messageInfo_UpdateValidationResponse.DiscardUnknown(m) } var xxx_messageInfo_UpdateValidationResponse proto.InternalMessageInfo func (m *UpdateValidationResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *UpdateValidationResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *UpdateValidationResponse) GetSources() []*CommonWorkerResponse { if m != nil { return m.Sources } return nil } type EncryptRequest struct { Plaintext string `protobuf:"bytes,1,opt,name=plaintext,proto3" json:"plaintext,omitempty"` } func (m *EncryptRequest) Reset() { *m = EncryptRequest{} } func (m *EncryptRequest) String() string { return proto.CompactTextString(m) } func (*EncryptRequest) ProtoMessage() {} func (*EncryptRequest) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{55} } func (m *EncryptRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EncryptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_EncryptRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *EncryptRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_EncryptRequest.Merge(m, src) } func (m *EncryptRequest) XXX_Size() int { return m.Size() } func (m *EncryptRequest) XXX_DiscardUnknown() { xxx_messageInfo_EncryptRequest.DiscardUnknown(m) } var xxx_messageInfo_EncryptRequest proto.InternalMessageInfo func (m *EncryptRequest) GetPlaintext() string { if m != nil { return m.Plaintext } return "" } type EncryptResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Ciphertext string `protobuf:"bytes,3,opt,name=ciphertext,proto3" json:"ciphertext,omitempty"` } func (m *EncryptResponse) Reset() { *m = EncryptResponse{} } func (m *EncryptResponse) String() string { return proto.CompactTextString(m) } func (*EncryptResponse) ProtoMessage() {} func (*EncryptResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{56} } func (m *EncryptResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *EncryptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_EncryptResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *EncryptResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_EncryptResponse.Merge(m, src) } func (m *EncryptResponse) XXX_Size() int { return m.Size() } func (m *EncryptResponse) XXX_DiscardUnknown() { xxx_messageInfo_EncryptResponse.DiscardUnknown(m) } var xxx_messageInfo_EncryptResponse proto.InternalMessageInfo func (m *EncryptResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *EncryptResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *EncryptResponse) GetCiphertext() string { if m != nil { return m.Ciphertext } return "" } type ListTaskConfigsResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` TaskConfigs map[string]string `protobuf:"bytes,3,rep,name=taskConfigs,proto3" json:"taskConfigs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *ListTaskConfigsResponse) Reset() { *m = ListTaskConfigsResponse{} } func (m *ListTaskConfigsResponse) String() string { return proto.CompactTextString(m) } func (*ListTaskConfigsResponse) ProtoMessage() {} func (*ListTaskConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{57} } func (m *ListTaskConfigsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ListTaskConfigsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ListTaskConfigsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ListTaskConfigsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ListTaskConfigsResponse.Merge(m, src) } func (m *ListTaskConfigsResponse) XXX_Size() int { return m.Size() } func (m *ListTaskConfigsResponse) XXX_DiscardUnknown() { xxx_messageInfo_ListTaskConfigsResponse.DiscardUnknown(m) } var xxx_messageInfo_ListTaskConfigsResponse proto.InternalMessageInfo func (m *ListTaskConfigsResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *ListTaskConfigsResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *ListTaskConfigsResponse) GetTaskConfigs() map[string]string { if m != nil { return m.TaskConfigs } return nil } type ListSourceConfigsResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` SourceConfigs map[string]string `protobuf:"bytes,3,rep,name=sourceConfigs,proto3" json:"sourceConfigs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *ListSourceConfigsResponse) Reset() { *m = ListSourceConfigsResponse{} } func (m *ListSourceConfigsResponse) String() string { return proto.CompactTextString(m) } func (*ListSourceConfigsResponse) ProtoMessage() {} func (*ListSourceConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor_f9bef11f2a341f03, []int{58} } func (m *ListSourceConfigsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ListSourceConfigsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ListSourceConfigsResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ListSourceConfigsResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_ListSourceConfigsResponse.Merge(m, src) } func (m *ListSourceConfigsResponse) XXX_Size() int { return m.Size() } func (m *ListSourceConfigsResponse) XXX_DiscardUnknown() { xxx_messageInfo_ListSourceConfigsResponse.DiscardUnknown(m) } var xxx_messageInfo_ListSourceConfigsResponse proto.InternalMessageInfo func (m *ListSourceConfigsResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *ListSourceConfigsResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *ListSourceConfigsResponse) GetSourceConfigs() map[string]string { if m != nil { return m.SourceConfigs } return nil } func init() { proto.RegisterEnum("pb.UnlockDDLLockOp", UnlockDDLLockOp_name, UnlockDDLLockOp_value) proto.RegisterEnum("pb.SourceOp", SourceOp_name, SourceOp_value) proto.RegisterEnum("pb.LeaderOp", LeaderOp_name, LeaderOp_value) proto.RegisterEnum("pb.CfgType", CfgType_name, CfgType_value) proto.RegisterEnum("pb.RelayOpV2", RelayOpV2_name, RelayOpV2_value) proto.RegisterType((*StartTaskRequest)(nil), "pb.StartTaskRequest") proto.RegisterType((*StartTaskResponse)(nil), "pb.StartTaskResponse") proto.RegisterType((*OperateTaskRequest)(nil), "pb.OperateTaskRequest") proto.RegisterType((*OperateTaskResponse)(nil), "pb.OperateTaskResponse") proto.RegisterType((*UpdateTaskRequest)(nil), "pb.UpdateTaskRequest") proto.RegisterType((*UpdateTaskResponse)(nil), "pb.UpdateTaskResponse") proto.RegisterType((*QueryStatusListRequest)(nil), "pb.QueryStatusListRequest") proto.RegisterType((*QueryStatusListResponse)(nil), "pb.QueryStatusListResponse") proto.RegisterType((*ShowDDLLocksRequest)(nil), "pb.ShowDDLLocksRequest") proto.RegisterType((*DDLLock)(nil), "pb.DDLLock") proto.RegisterType((*ShowDDLLocksResponse)(nil), "pb.ShowDDLLocksResponse") proto.RegisterType((*UnlockDDLLockRequest)(nil), "pb.UnlockDDLLockRequest") proto.RegisterType((*UnlockDDLLockResponse)(nil), "pb.UnlockDDLLockResponse") proto.RegisterType((*OperateWorkerRelayRequest)(nil), "pb.OperateWorkerRelayRequest") proto.RegisterType((*OperateWorkerRelayResponse)(nil), "pb.OperateWorkerRelayResponse") proto.RegisterType((*PurgeWorkerRelayRequest)(nil), "pb.PurgeWorkerRelayRequest") proto.RegisterType((*PurgeWorkerRelayResponse)(nil), "pb.PurgeWorkerRelayResponse") proto.RegisterType((*CheckTaskRequest)(nil), "pb.CheckTaskRequest") proto.RegisterType((*CheckTaskResponse)(nil), "pb.CheckTaskResponse") proto.RegisterType((*OperateSourceRequest)(nil), "pb.OperateSourceRequest") proto.RegisterType((*OperateSourceResponse)(nil), "pb.OperateSourceResponse") proto.RegisterType((*RegisterWorkerRequest)(nil), "pb.RegisterWorkerRequest") proto.RegisterType((*RegisterWorkerResponse)(nil), "pb.RegisterWorkerResponse") proto.RegisterType((*OfflineMemberRequest)(nil), "pb.OfflineMemberRequest") proto.RegisterType((*OfflineMemberResponse)(nil), "pb.OfflineMemberResponse") proto.RegisterType((*OperateLeaderRequest)(nil), "pb.OperateLeaderRequest") proto.RegisterType((*OperateLeaderResponse)(nil), "pb.OperateLeaderResponse") proto.RegisterType((*MasterInfo)(nil), "pb.MasterInfo") proto.RegisterType((*WorkerInfo)(nil), "pb.WorkerInfo") proto.RegisterType((*ListLeaderMember)(nil), "pb.ListLeaderMember") proto.RegisterType((*ListMasterMember)(nil), "pb.ListMasterMember") proto.RegisterType((*ListWorkerMember)(nil), "pb.ListWorkerMember") proto.RegisterType((*Members)(nil), "pb.Members") proto.RegisterType((*ListMemberRequest)(nil), "pb.ListMemberRequest") proto.RegisterType((*ListMemberResponse)(nil), "pb.ListMemberResponse") proto.RegisterType((*OperateSchemaRequest)(nil), "pb.OperateSchemaRequest") proto.RegisterType((*OperateSchemaResponse)(nil), "pb.OperateSchemaResponse") proto.RegisterType((*GetSubTaskCfgRequest)(nil), "pb.GetSubTaskCfgRequest") proto.RegisterType((*GetSubTaskCfgResponse)(nil), "pb.GetSubTaskCfgResponse") proto.RegisterType((*GetCfgRequest)(nil), "pb.GetCfgRequest") proto.RegisterType((*GetCfgResponse)(nil), "pb.GetCfgResponse") proto.RegisterType((*GetMasterCfgRequest)(nil), "pb.GetMasterCfgRequest") proto.RegisterType((*GetMasterCfgResponse)(nil), "pb.GetMasterCfgResponse") proto.RegisterType((*HandleErrorRequest)(nil), "pb.HandleErrorRequest") proto.RegisterType((*HandleErrorResponse)(nil), "pb.HandleErrorResponse") proto.RegisterType((*TransferSourceRequest)(nil), "pb.TransferSourceRequest") proto.RegisterType((*TransferSourceResponse)(nil), "pb.TransferSourceResponse") proto.RegisterType((*OperateRelayRequest)(nil), "pb.OperateRelayRequest") proto.RegisterType((*OperateRelayResponse)(nil), "pb.OperateRelayResponse") proto.RegisterType((*StartValidationRequest)(nil), "pb.StartValidationRequest") proto.RegisterType((*StartValidationResponse)(nil), "pb.StartValidationResponse") proto.RegisterType((*StopValidationRequest)(nil), "pb.StopValidationRequest") proto.RegisterType((*StopValidationResponse)(nil), "pb.StopValidationResponse") proto.RegisterType((*UpdateValidationRequest)(nil), "pb.UpdateValidationRequest") proto.RegisterType((*UpdateValidationResponse)(nil), "pb.UpdateValidationResponse") proto.RegisterType((*EncryptRequest)(nil), "pb.EncryptRequest") proto.RegisterType((*EncryptResponse)(nil), "pb.EncryptResponse") proto.RegisterType((*ListTaskConfigsResponse)(nil), "pb.ListTaskConfigsResponse") proto.RegisterMapType((map[string]string)(nil), "pb.ListTaskConfigsResponse.TaskConfigsEntry") proto.RegisterType((*ListSourceConfigsResponse)(nil), "pb.ListSourceConfigsResponse") proto.RegisterMapType((map[string]string)(nil), "pb.ListSourceConfigsResponse.SourceConfigsEntry") } func init() { proto.RegisterFile("dmmaster.proto", fileDescriptor_f9bef11f2a341f03) } var fileDescriptor_f9bef11f2a341f03 = []byte{ // 2658 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0xcd, 0x6f, 0x1b, 0xc7, 0xf5, 0x5c, 0x52, 0x1f, 0xd4, 0x93, 0x44, 0x51, 0x23, 0x89, 0xa2, 0xd7, 0x32, 0x2d, 0x4f, 0x3e, 0x20, 0x08, 0x81, 0xf4, 0xb3, 0x7e, 0x39, 0x14, 0x06, 0x12, 0x24, 0x16, 0x15, 0x5b, 0x88, 0x6c, 0xa7, 0x2b, 0xd9, 0x6d, 0x10, 0xa0, 0xc9, 0x92, 0x1c, 0x52, 0x84, 0x96, 0xbb, 0xeb, 0xdd, 0xa5, 0x64, 0xc2, 0x48, 0x0f, 0x3d, 0xf5, 0xd2, 0xb4, 0x45, 0x8a, 0xe6, 0xd8, 0x43, 0xff, 0x81, 0xfe, 0x19, 0x3d, 0x06, 0xc8, 0xa5, 0x97, 0xa2, 0x85, 0xdd, 0x3f, 0xa4, 0x98, 0x37, 0xb3, 0xbb, 0xb3, 0x1f, 0x64, 0x4a, 0x03, 0x15, 0x7a, 0x9b, 0xf7, 0xde, 0xf0, 0x7d, 0xcd, 0x9b, 0x79, 0x1f, 0x4b, 0xa8, 0x74, 0x06, 0x03, 0xd3, 0x0f, 0x98, 0xb7, 0xe7, 0x7a, 0x4e, 0xe0, 0x90, 0xa2, 0xdb, 0xd2, 0x2b, 0x9d, 0xc1, 0x95, 0xe3, 0x5d, 0x84, 0x38, 0x7d, 0xab, 0xe7, 0x38, 0x3d, 0x8b, 0xed, 0x9b, 0x6e, 0x7f, 0xdf, 0xb4, 0x6d, 0x27, 0x30, 0x83, 0xbe, 0x63, 0xfb, 0x92, 0x7a, 0x53, 0x52, 0x11, 0x6a, 0x0d, 0xbb, 0xfb, 0x6c, 0xe0, 0x06, 0x23, 0x41, 0xa4, 0xbf, 0x84, 0xea, 0x69, 0x60, 0x7a, 0xc1, 0x99, 0xe9, 0x5f, 0x18, 0xec, 0xf9, 0x90, 0xf9, 0x01, 0x21, 0x30, 0x13, 0x98, 0xfe, 0x45, 0x5d, 0xdb, 0xd6, 0x76, 0x16, 0x0c, 0x5c, 0x93, 0x3a, 0xcc, 0xfb, 0xce, 0xd0, 0x6b, 0x33, 0xbf, 0x5e, 0xdc, 0x2e, 0xed, 0x2c, 0x18, 0x21, 0x48, 0x1a, 0x00, 0x1e, 0x1b, 0x38, 0x97, 0xec, 0x11, 0x0b, 0xcc, 0x7a, 0x69, 0x5b, 0xdb, 0x29, 0x1b, 0x0a, 0x86, 0x6c, 0xc1, 0x82, 0x8f, 0x12, 0xfa, 0x03, 0x56, 0x9f, 0x41, 0x96, 0x31, 0x82, 0x7e, 0xab, 0xc1, 0xaa, 0xa2, 0x80, 0xef, 0x3a, 0xb6, 0xcf, 0x48, 0x0d, 0xe6, 0x3c, 0xe6, 0x0f, 0xad, 0x00, 0x75, 0x28, 0x1b, 0x12, 0x22, 0x55, 0x28, 0x0d, 0xfc, 0x5e, 0xbd, 0x88, 0x5c, 0xf8, 0x92, 0x1c, 0xc4, 0x7a, 0x95, 0xb6, 0x4b, 0x3b, 0x8b, 0x07, 0xf5, 0x3d, 0xb7, 0xb5, 0x77, 0xe8, 0x0c, 0x06, 0x8e, 0xfd, 0x33, 0xf4, 0x51, 0xc8, 0x34, 0xd6, 0x78, 0x1b, 0x16, 0xdb, 0xe7, 0xac, 0xcd, 0xc5, 0x71, 0x11, 0x42, 0x27, 0x15, 0x45, 0x7f, 0x01, 0xe4, 0x89, 0xcb, 0x3c, 0x33, 0x60, 0xaa, 0x5f, 0x74, 0x28, 0x3a, 0x2e, 0x6a, 0x54, 0x39, 0x00, 0x2e, 0x86, 0x13, 0x9f, 0xb8, 0x46, 0xd1, 0x71, 0xb9, 0xcf, 0x6c, 0x73, 0xc0, 0xa4, 0x6a, 0xb8, 0x56, 0x7d, 0x56, 0x4a, 0xf8, 0x8c, 0xfe, 0x56, 0x83, 0xb5, 0x84, 0x00, 0x69, 0xf7, 0x24, 0x09, 0xb1, 0x4f, 0x8a, 0x79, 0x3e, 0x29, 0xe5, 0xfa, 0x64, 0xe6, 0x3f, 0xf4, 0x09, 0xfd, 0x18, 0x56, 0x9f, 0xba, 0x9d, 0x94, 0xc1, 0x53, 0x05, 0x02, 0xfd, 0x83, 0x06, 0x44, 0xe5, 0xf1, 0x3f, 0x72, 0x96, 0x9f, 0x40, 0xed, 0xa7, 0x43, 0xe6, 0x8d, 0x4e, 0x03, 0x33, 0x18, 0xfa, 0x27, 0x7d, 0x3f, 0x50, 0xcc, 0xc3, 0x33, 0xd3, 0xf2, 0xcf, 0x2c, 0x65, 0xde, 0x25, 0x6c, 0x66, 0xf8, 0x4c, 0x6d, 0xe2, 0xdd, 0xb4, 0x89, 0x9b, 0xdc, 0x44, 0x85, 0x6f, 0xf6, 0x64, 0x0e, 0x61, 0xed, 0xf4, 0xdc, 0xb9, 0x6a, 0x36, 0x4f, 0x4e, 0x9c, 0xf6, 0x85, 0xff, 0x66, 0x67, 0xf3, 0x27, 0x0d, 0xe6, 0x25, 0x07, 0x52, 0x81, 0xe2, 0x71, 0x53, 0xfe, 0xae, 0x78, 0xdc, 0x8c, 0x38, 0x15, 0x15, 0x4e, 0x04, 0x66, 0x06, 0x4e, 0x87, 0xc9, 0xa8, 0xc2, 0x35, 0x59, 0x87, 0x59, 0xe7, 0xca, 0x66, 0x9e, 0x74, 0xb2, 0x00, 0xf8, 0xce, 0x66, 0xf3, 0xc4, 0xaf, 0xcf, 0xa2, 0x40, 0x5c, 0x73, 0x7f, 0xf8, 0x23, 0xbb, 0xcd, 0x3a, 0xf5, 0x39, 0xc4, 0x4a, 0x88, 0xe8, 0x50, 0x1e, 0xda, 0x92, 0x32, 0x8f, 0x94, 0x08, 0xa6, 0x6d, 0x58, 0x4f, 0x9a, 0x39, 0xb5, 0x6f, 0xef, 0xc0, 0xac, 0xc5, 0x7f, 0x2a, 0x3d, 0xbb, 0xc8, 0x3d, 0x2b, 0xd9, 0x19, 0x82, 0x42, 0xff, 0xae, 0xc1, 0xfa, 0x53, 0x9b, 0xaf, 0x43, 0x82, 0xf4, 0x66, 0xda, 0x27, 0x14, 0x96, 0x3c, 0xe6, 0x5a, 0x66, 0x9b, 0x3d, 0x41, 0x93, 0x85, 0x98, 0x04, 0x8e, 0x87, 0x5e, 0xd7, 0xf1, 0xda, 0xcc, 0xc0, 0xb7, 0x4e, 0xbe, 0x7c, 0x2a, 0x8a, 0xbc, 0x85, 0xd7, 0x79, 0x06, 0xaf, 0xf3, 0x1a, 0x57, 0x27, 0x21, 0x5b, 0xde, 0x6b, 0xe5, 0xd0, 0x66, 0x93, 0x2f, 0xab, 0x0e, 0xe5, 0x8e, 0x19, 0x98, 0x2d, 0xd3, 0x67, 0xf5, 0x39, 0x54, 0x20, 0x82, 0xf9, 0x61, 0x04, 0x66, 0xcb, 0x62, 0xf5, 0x79, 0x71, 0x18, 0x08, 0xd0, 0x8f, 0x61, 0x23, 0x65, 0xde, 0xb4, 0x5e, 0xa4, 0x06, 0xdc, 0x90, 0x2f, 0x53, 0x78, 0xe5, 0x2c, 0x73, 0x14, 0xba, 0xe9, 0xa6, 0xf2, 0x3e, 0xa1, 0x7f, 0x91, 0x9a, 0x35, 0x24, 0x15, 0x7d, 0xdf, 0x69, 0xa0, 0xe7, 0x31, 0x95, 0xca, 0x4d, 0xe4, 0xfa, 0xdf, 0x7d, 0xf6, 0xbe, 0xd3, 0x60, 0xf3, 0xb3, 0xa1, 0xd7, 0xcb, 0x33, 0x56, 0xb1, 0x47, 0xcb, 0x1c, 0x4c, 0xdf, 0x36, 0xdb, 0x41, 0xff, 0x92, 0x49, 0xad, 0x22, 0x18, 0x6f, 0x13, 0xcf, 0x74, 0x5c, 0xb1, 0x92, 0x81, 0x6b, 0xbe, 0xbf, 0xdb, 0xb7, 0x18, 0x3e, 0x36, 0xe2, 0xf2, 0x44, 0x30, 0xde, 0x95, 0x61, 0xab, 0xd9, 0xf7, 0xea, 0xb3, 0x48, 0x91, 0x10, 0x7d, 0x01, 0xf5, 0xac, 0x62, 0xd7, 0xf1, 0xa4, 0xd2, 0x4b, 0xa8, 0x1e, 0xf2, 0xf7, 0xf3, 0xc7, 0x32, 0x41, 0x0d, 0xe6, 0x98, 0xe7, 0x1d, 0xda, 0xe2, 0x64, 0x4a, 0x86, 0x84, 0xb8, 0xdf, 0xae, 0x4c, 0xcf, 0xe6, 0x04, 0xe1, 0x84, 0x10, 0xfc, 0x91, 0x52, 0xe0, 0x03, 0x58, 0x55, 0xe4, 0x4e, 0x1d, 0xb8, 0xbf, 0xd6, 0x60, 0x5d, 0x06, 0xd9, 0x29, 0x5a, 0x12, 0xea, 0xbe, 0xa5, 0x84, 0xd7, 0x12, 0x37, 0x5f, 0x90, 0xe3, 0xf8, 0x6a, 0x3b, 0x76, 0xb7, 0xdf, 0x93, 0x41, 0x2b, 0x21, 0x7e, 0x66, 0xc2, 0x21, 0xc7, 0x4d, 0x99, 0xbd, 0x23, 0x98, 0x97, 0x3c, 0xa2, 0xfe, 0x7a, 0x1c, 0x9f, 0xa8, 0x82, 0xa1, 0x43, 0xd8, 0x48, 0x69, 0x72, 0x2d, 0x07, 0x77, 0x04, 0x1b, 0x06, 0xeb, 0xf5, 0x79, 0xb1, 0x18, 0x6e, 0x99, 0x98, 0xe8, 0xcc, 0x4e, 0xc7, 0x63, 0xbe, 0x2f, 0xc5, 0x86, 0x20, 0xfd, 0x0a, 0x6a, 0x69, 0x36, 0x53, 0xab, 0xcf, 0x4f, 0x9a, 0xb5, 0x3d, 0x16, 0x7c, 0xca, 0x46, 0x18, 0x05, 0x4b, 0x46, 0x8c, 0xa0, 0x1f, 0xc2, 0xfa, 0x93, 0x6e, 0xd7, 0xea, 0xdb, 0xec, 0x11, 0x1b, 0xb4, 0x12, 0x7a, 0x06, 0x23, 0x37, 0xd2, 0x93, 0xaf, 0xf3, 0x0a, 0x2b, 0xfe, 0xcc, 0xa5, 0x7e, 0x3f, 0x75, 0xb4, 0xbc, 0x1f, 0x05, 0xcb, 0x09, 0x33, 0x3b, 0xb1, 0x0a, 0x99, 0x60, 0x11, 0x64, 0x11, 0x2c, 0x28, 0x38, 0xf9, 0xab, 0xa9, 0x05, 0x7f, 0xa3, 0x01, 0x3c, 0xc2, 0x82, 0xfe, 0xd8, 0xee, 0x3a, 0xb9, 0x47, 0xa3, 0x43, 0x79, 0x80, 0x76, 0x1d, 0x37, 0xf1, 0x97, 0x33, 0x46, 0x04, 0xf3, 0x77, 0xdf, 0xb4, 0xfa, 0x51, 0xba, 0x11, 0x00, 0xff, 0x85, 0xcb, 0x98, 0xf7, 0xd4, 0x38, 0x11, 0x6f, 0xdf, 0x82, 0x11, 0xc1, 0x3c, 0x58, 0xdb, 0x56, 0x9f, 0xd9, 0x01, 0x52, 0x45, 0x8a, 0x51, 0x30, 0xb4, 0x05, 0x20, 0x8e, 0x79, 0xac, 0x3e, 0x04, 0x66, 0x78, 0x6c, 0x84, 0x47, 0xc0, 0xd7, 0x5c, 0x0f, 0x3f, 0x30, 0x7b, 0x61, 0x85, 0x20, 0x00, 0x7c, 0xcc, 0x30, 0x18, 0xe5, 0xa5, 0x90, 0x10, 0x3d, 0x81, 0x2a, 0x2f, 0x98, 0x84, 0xd3, 0xc4, 0x99, 0x85, 0xae, 0xd1, 0xe2, 0xa0, 0xc9, 0xab, 0xa1, 0x43, 0xd9, 0xa5, 0x58, 0x36, 0x7d, 0x2c, 0xb8, 0x09, 0x2f, 0x8e, 0xe5, 0xb6, 0x03, 0xf3, 0xa2, 0x71, 0x12, 0xe9, 0x68, 0xf1, 0xa0, 0xc2, 0x8f, 0x33, 0x76, 0xbd, 0x11, 0x92, 0x43, 0x7e, 0xc2, 0x0b, 0x93, 0xf8, 0x89, 0x2b, 0x9e, 0xe0, 0x17, 0xbb, 0xce, 0x08, 0xc9, 0xf4, 0xcf, 0x1a, 0xcc, 0x0b, 0x36, 0x3e, 0xd9, 0x83, 0x39, 0x0b, 0xad, 0x46, 0x56, 0x8b, 0x07, 0xeb, 0x18, 0x53, 0x29, 0x5f, 0x3c, 0x2c, 0x18, 0x72, 0x17, 0xdf, 0x2f, 0xd4, 0x42, 0x2f, 0x28, 0xfb, 0x55, 0x6b, 0xf9, 0x7e, 0xb1, 0x8b, 0xef, 0x17, 0x62, 0xd1, 0x43, 0xca, 0x7e, 0xd5, 0x1a, 0xbe, 0x5f, 0xec, 0xba, 0x5f, 0x86, 0x39, 0x11, 0x4b, 0xf4, 0x39, 0xac, 0x22, 0xdf, 0xc4, 0x0d, 0xac, 0x25, 0xd4, 0x2d, 0x47, 0x6a, 0xd5, 0x12, 0x6a, 0x95, 0x23, 0xf1, 0xb5, 0x84, 0xf8, 0x72, 0x28, 0x86, 0x87, 0x07, 0x3f, 0xbe, 0x30, 0x1a, 0x05, 0x40, 0x19, 0x10, 0x55, 0xe4, 0xd4, 0xaf, 0xca, 0x3b, 0x30, 0x2f, 0x94, 0x4f, 0xd4, 0x78, 0xd2, 0xd5, 0x46, 0x48, 0xa3, 0x7f, 0x2c, 0xc6, 0x99, 0xa0, 0x7d, 0xce, 0x06, 0xe6, 0xf8, 0x4c, 0x80, 0xe4, 0xb8, 0x85, 0xcb, 0xd4, 0xc1, 0x63, 0x5b, 0xb8, 0x44, 0x71, 0x36, 0x33, 0xae, 0x38, 0x9b, 0x55, 0x8a, 0x33, 0xbc, 0x1c, 0x28, 0x4f, 0x16, 0x73, 0x12, 0xe2, 0xbb, 0xbb, 0xd6, 0xd0, 0x3f, 0xc7, 0x52, 0xae, 0x6c, 0x08, 0x80, 0x6b, 0xc3, 0x2b, 0xe3, 0x7a, 0x19, 0x91, 0xb8, 0xe6, 0x57, 0xb9, 0xeb, 0x39, 0x03, 0x91, 0x54, 0xea, 0x0b, 0xa2, 0xd5, 0x8e, 0x31, 0x21, 0xfd, 0xcc, 0xf4, 0x7a, 0x2c, 0xa8, 0x43, 0x4c, 0x17, 0x18, 0x35, 0x2f, 0x49, 0xbf, 0x5c, 0x4b, 0x5e, 0xda, 0x85, 0xf5, 0x07, 0x2c, 0x38, 0x1d, 0xb6, 0x78, 0x66, 0x3f, 0xec, 0xf6, 0x26, 0xa4, 0x25, 0xfa, 0x14, 0x36, 0x52, 0x7b, 0xa7, 0x56, 0x91, 0xc0, 0x4c, 0xbb, 0xdb, 0x0b, 0x0f, 0x0c, 0xd7, 0xb4, 0x09, 0xcb, 0x0f, 0x58, 0xa0, 0xc8, 0xbe, 0xad, 0xa4, 0x1a, 0x59, 0x75, 0x1e, 0x76, 0x7b, 0x67, 0x23, 0x97, 0x4d, 0xc8, 0x3b, 0x27, 0x50, 0x09, 0xb9, 0x4c, 0xad, 0x55, 0x15, 0x4a, 0xed, 0x6e, 0x54, 0xaf, 0xb6, 0xbb, 0x3d, 0xba, 0x01, 0x6b, 0x0f, 0x98, 0xbc, 0xd7, 0xb1, 0x66, 0x74, 0x07, 0xbd, 0xa5, 0xa0, 0xa5, 0x28, 0xc9, 0x40, 0x8b, 0x19, 0xfc, 0x5e, 0x03, 0xf2, 0xd0, 0xb4, 0x3b, 0x16, 0x3b, 0xf2, 0x3c, 0xc7, 0x1b, 0x5b, 0xa4, 0x23, 0xf5, 0x8d, 0x82, 0x7c, 0x0b, 0x16, 0x5a, 0x7d, 0xdb, 0x72, 0x7a, 0x9f, 0x39, 0x7e, 0x58, 0xb0, 0x45, 0x08, 0x0c, 0xd1, 0xe7, 0x56, 0xd4, 0xfa, 0xf1, 0x35, 0xf5, 0x61, 0x2d, 0xa1, 0xd2, 0xb5, 0x04, 0xd8, 0x03, 0xd8, 0x38, 0xf3, 0x4c, 0xdb, 0xef, 0x32, 0x2f, 0x59, 0xfa, 0xc5, 0xf9, 0x48, 0x53, 0xf3, 0x91, 0xf2, 0x6c, 0x09, 0xc9, 0x12, 0xa2, 0xf7, 0xa1, 0x96, 0x66, 0x34, 0x75, 0x82, 0xef, 0x44, 0xa3, 0x9d, 0x44, 0x37, 0x71, 0x4b, 0x39, 0x95, 0x65, 0xa5, 0xc9, 0x79, 0x76, 0x10, 0x96, 0xa1, 0x52, 0xd3, 0xe2, 0x18, 0x4d, 0xc5, 0xd1, 0x84, 0x9a, 0x06, 0xd1, 0x13, 0x77, 0x9d, 0xad, 0xc1, 0x5f, 0x34, 0xa8, 0xe1, 0xb4, 0xee, 0x99, 0x69, 0xf5, 0x3b, 0x38, 0x65, 0x8c, 0x2f, 0x14, 0x0c, 0x9c, 0x0e, 0xfb, 0xf2, 0xd2, 0xb4, 0x86, 0xd2, 0xdd, 0x0f, 0x0b, 0xc6, 0x02, 0xc7, 0x3d, 0xe3, 0x28, 0xb2, 0x0b, 0x55, 0xac, 0xf5, 0xbf, 0xe4, 0x2d, 0x91, 0xdc, 0x86, 0xea, 0x3c, 0xd4, 0x8c, 0x4a, 0xd4, 0x05, 0x88, 0xbd, 0x13, 0x9f, 0x5d, 0x1e, 0xb3, 0x4a, 0xe1, 0x1d, 0xc1, 0xf7, 0xe7, 0xc4, 0xd0, 0xe2, 0xfe, 0xa2, 0xd2, 0x66, 0xd0, 0x2b, 0xd8, 0xcc, 0x68, 0x7c, 0x2d, 0xbe, 0x7a, 0x04, 0x1b, 0xa7, 0x81, 0xe3, 0x66, 0x3d, 0x35, 0xb1, 0xaf, 0x8c, 0x8c, 0x2b, 0x26, 0x8d, 0xa3, 0x97, 0xdc, 0xf3, 0x49, 0x76, 0xd7, 0x62, 0xc6, 0x6f, 0x34, 0xd8, 0x14, 0x53, 0xbd, 0xac, 0x25, 0xaa, 0xbe, 0x5a, 0x52, 0xdf, 0x09, 0x03, 0xe3, 0xc4, 0xa3, 0x52, 0x4a, 0x3f, 0x2a, 0x0d, 0x00, 0x01, 0x3c, 0x38, 0x3b, 0x6e, 0x86, 0xbd, 0x55, 0x8c, 0xe1, 0x7d, 0x71, 0x56, 0x9d, 0x6b, 0xf1, 0xc4, 0x1e, 0x54, 0x8e, 0xec, 0xb6, 0x37, 0x72, 0x83, 0xb8, 0x9e, 0x58, 0x70, 0x2d, 0xb3, 0x6f, 0x07, 0xec, 0x45, 0x20, 0x1d, 0x10, 0x23, 0xe8, 0x17, 0xb0, 0x12, 0xed, 0x9f, 0x5a, 0x41, 0x5e, 0xb5, 0xf7, 0xdd, 0x73, 0xe6, 0x21, 0x6f, 0xe1, 0x25, 0x05, 0x43, 0x7f, 0xd0, 0x60, 0x93, 0xd7, 0x52, 0x98, 0x26, 0xb1, 0x63, 0x7d, 0x93, 0x91, 0xd9, 0x63, 0x58, 0x0c, 0x62, 0x06, 0xd2, 0x15, 0xef, 0x85, 0x25, 0x64, 0x0e, 0xef, 0x3d, 0x05, 0x77, 0x64, 0x07, 0xde, 0xc8, 0x50, 0x19, 0xe8, 0x1f, 0x42, 0x35, 0xbd, 0x81, 0x4b, 0xbd, 0x60, 0xa3, 0x30, 0x6f, 0x5d, 0xb0, 0x11, 0x2f, 0x78, 0x94, 0xeb, 0x6f, 0x08, 0xe0, 0x5e, 0xf1, 0x27, 0x1a, 0xfd, 0x87, 0x06, 0x37, 0xb8, 0x64, 0xf1, 0xf8, 0xbe, 0xb9, 0x5d, 0xcf, 0x60, 0xd9, 0x57, 0x59, 0x48, 0xcb, 0xfe, 0x2f, 0xb4, 0x2c, 0x97, 0xff, 0x5e, 0x02, 0x2b, 0xac, 0x4b, 0xb2, 0xd1, 0x3f, 0x02, 0x92, 0xdd, 0x34, 0x8d, 0x85, 0xbb, 0x1f, 0xc1, 0x4a, 0x6a, 0x08, 0x48, 0x56, 0x61, 0xf9, 0xd8, 0xbe, 0xe4, 0xd1, 0x2c, 0x10, 0xd5, 0x02, 0x59, 0x82, 0xf2, 0xe9, 0x45, 0xdf, 0xe5, 0x70, 0x55, 0xe3, 0xd0, 0xd1, 0x0b, 0xd6, 0x46, 0xa8, 0xb8, 0xdb, 0x82, 0x72, 0x38, 0xc0, 0x20, 0x6b, 0xb0, 0x22, 0x7f, 0x1a, 0xa2, 0xaa, 0x05, 0xb2, 0x02, 0x8b, 0xf8, 0xe2, 0x09, 0x54, 0x55, 0x23, 0x55, 0x58, 0x12, 0x57, 0x46, 0x62, 0x8a, 0xa4, 0x02, 0xc0, 0x1f, 0x13, 0x09, 0x97, 0x10, 0x3e, 0x77, 0xae, 0x24, 0x3c, 0xb3, 0xfb, 0x29, 0x94, 0xc3, 0xbe, 0x57, 0x91, 0x11, 0xa2, 0xaa, 0x05, 0xae, 0xf3, 0xd1, 0x65, 0xbf, 0x1d, 0x44, 0x28, 0x8d, 0x6c, 0xc2, 0xda, 0xa1, 0x69, 0xb7, 0x99, 0x95, 0x24, 0x14, 0x77, 0x6d, 0x98, 0x97, 0xa5, 0x15, 0x57, 0x4d, 0xf2, 0xe2, 0xa0, 0x30, 0x94, 0x07, 0x0c, 0x42, 0x1a, 0x57, 0x43, 0xd4, 0x3d, 0x08, 0xa3, 0x9a, 0xe2, 0x32, 0x22, 0x2c, 0xd4, 0x44, 0x15, 0x11, 0x9e, 0x21, 0xeb, 0x22, 0xdc, 0xce, 0xd8, 0xc0, 0xb5, 0xcc, 0x40, 0x60, 0x67, 0x77, 0x9b, 0xb0, 0x10, 0xe5, 0x56, 0xbe, 0x45, 0x4a, 0x8c, 0x70, 0xd5, 0x02, 0xf7, 0x08, 0xba, 0x08, 0x71, 0xcf, 0x0e, 0xaa, 0x9a, 0x70, 0x9a, 0xe3, 0x86, 0x88, 0xe2, 0xc1, 0x37, 0xeb, 0x30, 0x27, 0x94, 0x21, 0x9f, 0xc3, 0x42, 0xf4, 0x89, 0x8a, 0x60, 0x83, 0x95, 0xfe, 0x64, 0xa6, 0x6f, 0xa4, 0xb0, 0x22, 0xa2, 0xe8, 0xed, 0x5f, 0xfd, 0xf0, 0xaf, 0x6f, 0x8b, 0x37, 0xe8, 0xfa, 0xbe, 0xe9, 0xf6, 0xfd, 0xfd, 0xcb, 0xbb, 0xa6, 0xe5, 0x9e, 0x9b, 0x77, 0xf7, 0xf9, 0x9d, 0xf1, 0xef, 0x69, 0xbb, 0xa4, 0x0b, 0x8b, 0xca, 0x77, 0x20, 0x52, 0xe3, 0x6c, 0xb2, 0x5f, 0x9e, 0xf4, 0xcd, 0x0c, 0x5e, 0x0a, 0x78, 0x17, 0x05, 0x6c, 0xeb, 0x37, 0xf3, 0x04, 0xec, 0xbf, 0xe4, 0x55, 0xeb, 0xd7, 0x5c, 0xce, 0x07, 0x00, 0xf1, 0xa7, 0x19, 0x82, 0xda, 0x66, 0x3e, 0xf7, 0xe8, 0xb5, 0x34, 0x5a, 0x0a, 0x29, 0x10, 0x0b, 0x16, 0x95, 0x6f, 0x14, 0x44, 0x4f, 0x7d, 0xb4, 0x50, 0x3e, 0xaa, 0xe8, 0x37, 0x73, 0x69, 0x92, 0xd3, 0xdb, 0xa8, 0x6e, 0x83, 0x6c, 0xa5, 0xd4, 0xf5, 0x71, 0xab, 0xd4, 0x97, 0x1c, 0xc2, 0x92, 0xfa, 0x29, 0x80, 0xa0, 0xf5, 0x39, 0xdf, 0x40, 0xf4, 0x7a, 0x96, 0x10, 0xa9, 0xfc, 0x09, 0x2c, 0x27, 0x2e, 0x1a, 0xa9, 0x67, 0x06, 0xf0, 0x21, 0x9b, 0x1b, 0x39, 0x94, 0x88, 0xcf, 0xe7, 0x50, 0xcb, 0x8e, 0xae, 0xd1, 0x8b, 0xb7, 0x94, 0x43, 0xc9, 0x8e, 0x8f, 0xf5, 0xc6, 0x38, 0x72, 0xc4, 0xfa, 0x09, 0x54, 0xd3, 0x23, 0x5e, 0x82, 0xee, 0x1b, 0x33, 0x91, 0xd6, 0xb7, 0xf2, 0x89, 0x11, 0xc3, 0x7b, 0xb0, 0x10, 0x4d, 0x50, 0x45, 0xa0, 0xa6, 0x07, 0xb9, 0x22, 0x50, 0x33, 0x63, 0x56, 0x5a, 0x20, 0x3d, 0x58, 0x4e, 0xcc, 0x2c, 0x85, 0xbf, 0xf2, 0x06, 0xaa, 0xc2, 0x5f, 0xb9, 0x03, 0x4e, 0x7a, 0x07, 0x0f, 0xf8, 0xa6, 0x5e, 0x4b, 0x1f, 0xb0, 0xc8, 0xa1, 0x3c, 0x14, 0x8f, 0xa1, 0x92, 0x1c, 0x2f, 0x92, 0x1b, 0xa2, 0x1c, 0xce, 0x99, 0x5c, 0xea, 0x7a, 0x1e, 0x29, 0xd2, 0xd9, 0x83, 0xe5, 0xc4, 0x1c, 0x50, 0xea, 0x9c, 0x33, 0x5a, 0x94, 0x3a, 0xe7, 0x0d, 0x0d, 0xe9, 0x7b, 0xa8, 0xf3, 0xbb, 0xbb, 0x6f, 0xa7, 0x74, 0x96, 0xe3, 0x84, 0xfd, 0x97, 0xbc, 0x1f, 0xfc, 0x3a, 0x0c, 0xce, 0x8b, 0xc8, 0x4f, 0xe2, 0x89, 0x4b, 0xf8, 0x29, 0x31, 0x4b, 0x4c, 0xf8, 0x29, 0x39, 0x2f, 0xa4, 0xef, 0xa0, 0xcc, 0xdb, 0xba, 0x9e, 0x92, 0x29, 0xc6, 0x2d, 0xfb, 0x2f, 0x1d, 0x17, 0xaf, 0xed, 0x17, 0x00, 0xf1, 0xc0, 0x44, 0x5c, 0xdb, 0xcc, 0xcc, 0x46, 0x5c, 0xdb, 0xec, 0x5c, 0x85, 0x36, 0x50, 0x46, 0x9d, 0xd4, 0xf2, 0xed, 0x22, 0xdd, 0xf8, 0xc4, 0xc5, 0x20, 0x22, 0x71, 0xe2, 0xea, 0xe0, 0x24, 0x79, 0xe2, 0x89, 0xd1, 0x01, 0xdd, 0x46, 0x29, 0xba, 0xbe, 0x91, 0x3e, 0x71, 0xdc, 0xc6, 0x8d, 0xb0, 0xb0, 0xf7, 0x8e, 0x5b, 0x7a, 0x21, 0x27, 0x6f, 0x22, 0x20, 0xe4, 0xe4, 0xf6, 0xff, 0xe1, 0x4b, 0x47, 0x1a, 0x69, 0x39, 0xc3, 0x96, 0xfa, 0xd8, 0x91, 0x33, 0x98, 0x13, 0x3d, 0x3a, 0x59, 0x95, 0xcc, 0x14, 0xfe, 0x44, 0x45, 0x49, 0xc6, 0x6f, 0x21, 0xe3, 0x5b, 0x64, 0xd2, 0x13, 0x4a, 0xbe, 0x82, 0x45, 0xa5, 0xad, 0x15, 0xef, 0x74, 0xb6, 0xf5, 0x16, 0xef, 0x74, 0x4e, 0xff, 0x3b, 0xd6, 0x4b, 0x8c, 0xef, 0xc2, 0x6b, 0x71, 0x08, 0x4b, 0x6a, 0xdb, 0x2f, 0x1e, 0xbd, 0x9c, 0xf9, 0x80, 0x5e, 0xcf, 0x12, 0xa2, 0x0b, 0x71, 0x0c, 0x95, 0x64, 0xff, 0x2a, 0xee, 0x56, 0x6e, 0x73, 0x2c, 0xee, 0x56, 0x7e, 0xbb, 0x4b, 0x0b, 0x5c, 0x1f, 0xb5, 0xc1, 0x24, 0x6a, 0x0a, 0x4a, 0x3c, 0x4a, 0xf5, 0x2c, 0x21, 0x62, 0x72, 0x02, 0x2b, 0xa9, 0xe6, 0x4b, 0xe4, 0x8e, 0xfc, 0x1e, 0x52, 0xe4, 0x8e, 0x31, 0xdd, 0x9a, 0xb0, 0x2e, 0xd9, 0x02, 0x09, 0xeb, 0x72, 0xbb, 0x2c, 0x5d, 0xcf, 0x23, 0x45, 0xac, 0x7e, 0x8e, 0xb3, 0x97, 0x98, 0x24, 0x13, 0x5b, 0x43, 0xfa, 0x36, 0x4d, 0x08, 0x99, 0xde, 0x1e, 0x4b, 0x8f, 0x38, 0x3f, 0x05, 0x92, 0xd8, 0x20, 0x02, 0xe6, 0x56, 0xe6, 0x87, 0x89, 0xb8, 0x69, 0x8c, 0x23, 0x47, 0x6c, 0xcd, 0x28, 0x0d, 0xa5, 0x59, 0xdf, 0x51, 0xfc, 0x3f, 0x86, 0x3d, 0x9d, 0xb4, 0x45, 0x4d, 0x47, 0xe9, 0xce, 0x4a, 0xa4, 0xa3, 0x31, 0xed, 0x9f, 0x48, 0x47, 0xe3, 0x9a, 0x31, 0x5a, 0x20, 0xef, 0xc3, 0xbc, 0x6c, 0x80, 0x08, 0x5e, 0xbc, 0x64, 0xf7, 0xa4, 0xaf, 0x25, 0x70, 0xd1, 0xaf, 0x1e, 0xc2, 0x4a, 0xaa, 0xf9, 0x20, 0xb5, 0x3d, 0xf1, 0x17, 0xa6, 0xbd, 0xf0, 0x2f, 0x4c, 0x7b, 0x47, 0x03, 0x37, 0x18, 0x89, 0x78, 0x19, 0xd3, 0xa9, 0x60, 0xf4, 0xad, 0x66, 0x8a, 0xfd, 0xb1, 0xbc, 0x6e, 0x4d, 0xec, 0x0d, 0x68, 0xe1, 0x7e, 0xfd, 0xaf, 0xaf, 0x1a, 0xda, 0xf7, 0xaf, 0x1a, 0xda, 0x3f, 0x5f, 0x35, 0xb4, 0xdf, 0xbd, 0x6e, 0x14, 0xbe, 0x7f, 0xdd, 0x28, 0xfc, 0xed, 0x75, 0xa3, 0xd0, 0x9a, 0x43, 0x56, 0xff, 0xff, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x32, 0x57, 0xbf, 0xa1, 0xab, 0x25, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // MasterClient is the client API for Master service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type MasterClient interface { StartTask(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*StartTaskResponse, error) OperateTask(ctx context.Context, in *OperateTaskRequest, opts ...grpc.CallOption) (*OperateTaskResponse, error) UpdateTask(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*UpdateTaskResponse, error) QueryStatus(ctx context.Context, in *QueryStatusListRequest, opts ...grpc.CallOption) (*QueryStatusListResponse, error) // show un-resolved DDL locks ShowDDLLocks(ctx context.Context, in *ShowDDLLocksRequest, opts ...grpc.CallOption) (*ShowDDLLocksResponse, error) // used by dmctl to manually unlock DDL lock UnlockDDLLock(ctx context.Context, in *UnlockDDLLockRequest, opts ...grpc.CallOption) (*UnlockDDLLockResponse, error) // OperateWorkerRelayTask requests some dm-workers to operate relay unit OperateWorkerRelayTask(ctx context.Context, in *OperateWorkerRelayRequest, opts ...grpc.CallOption) (*OperateWorkerRelayResponse, error) // PurgeWorkerRelay purges relay log files for some dm-workers PurgeWorkerRelay(ctx context.Context, in *PurgeWorkerRelayRequest, opts ...grpc.CallOption) (*PurgeWorkerRelayResponse, error) // CheckTask checks legality of task configuration CheckTask(ctx context.Context, in *CheckTaskRequest, opts ...grpc.CallOption) (*CheckTaskResponse, error) // Operate an upstream MySQL source. OperateSource(ctx context.Context, in *OperateSourceRequest, opts ...grpc.CallOption) (*OperateSourceResponse, error) // RegisterWorker register the dm-workers. RegisterWorker(ctx context.Context, in *RegisterWorkerRequest, opts ...grpc.CallOption) (*RegisterWorkerResponse, error) // OfflineMember offline the dm cluster's members (master/worker). OfflineMember(ctx context.Context, in *OfflineMemberRequest, opts ...grpc.CallOption) (*OfflineMemberResponse, error) // OperateLeader do some operate on master: // - evict leader: make the master resign if it is leader, and will not campaign the leader again // - cancel evict leader: the master can campaign leader again. OperateLeader(ctx context.Context, in *OperateLeaderRequest, opts ...grpc.CallOption) (*OperateLeaderResponse, error) // ListMember list member information ListMember(ctx context.Context, in *ListMemberRequest, opts ...grpc.CallOption) (*ListMemberResponse, error) OperateSchema(ctx context.Context, in *OperateSchemaRequest, opts ...grpc.CallOption) (*OperateSchemaResponse, error) GetSubTaskCfg(ctx context.Context, in *GetSubTaskCfgRequest, opts ...grpc.CallOption) (*GetSubTaskCfgResponse, error) // GetCfg get config GetCfg(ctx context.Context, in *GetCfgRequest, opts ...grpc.CallOption) (*GetCfgResponse, error) HandleError(ctx context.Context, in *HandleErrorRequest, opts ...grpc.CallOption) (*HandleErrorResponse, error) GetMasterCfg(ctx context.Context, in *GetMasterCfgRequest, opts ...grpc.CallOption) (*GetMasterCfgResponse, error) TransferSource(ctx context.Context, in *TransferSourceRequest, opts ...grpc.CallOption) (*TransferSourceResponse, error) OperateRelay(ctx context.Context, in *OperateRelayRequest, opts ...grpc.CallOption) (*OperateRelayResponse, error) StartValidation(ctx context.Context, in *StartValidationRequest, opts ...grpc.CallOption) (*StartValidationResponse, error) StopValidation(ctx context.Context, in *StopValidationRequest, opts ...grpc.CallOption) (*StopValidationResponse, error) GetValidationStatus(ctx context.Context, in *GetValidationStatusRequest, opts ...grpc.CallOption) (*GetValidationStatusResponse, error) GetValidationError(ctx context.Context, in *GetValidationErrorRequest, opts ...grpc.CallOption) (*GetValidationErrorResponse, error) OperateValidationError(ctx context.Context, in *OperateValidationErrorRequest, opts ...grpc.CallOption) (*OperateValidationErrorResponse, error) UpdateValidation(ctx context.Context, in *UpdateValidationRequest, opts ...grpc.CallOption) (*UpdateValidationResponse, error) // Encrypt encrypts the plaintext using the secret key of dm-master Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) ListTaskConfigs(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListTaskConfigsResponse, error) ListSourceConfigs(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListSourceConfigsResponse, error) } type masterClient struct { cc *grpc.ClientConn } func NewMasterClient(cc *grpc.ClientConn) MasterClient { return &masterClient{cc} } func (c *masterClient) StartTask(ctx context.Context, in *StartTaskRequest, opts ...grpc.CallOption) (*StartTaskResponse, error) { out := new(StartTaskResponse) err := c.cc.Invoke(ctx, "/pb.Master/StartTask", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OperateTask(ctx context.Context, in *OperateTaskRequest, opts ...grpc.CallOption) (*OperateTaskResponse, error) { out := new(OperateTaskResponse) err := c.cc.Invoke(ctx, "/pb.Master/OperateTask", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) UpdateTask(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*UpdateTaskResponse, error) { out := new(UpdateTaskResponse) err := c.cc.Invoke(ctx, "/pb.Master/UpdateTask", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) QueryStatus(ctx context.Context, in *QueryStatusListRequest, opts ...grpc.CallOption) (*QueryStatusListResponse, error) { out := new(QueryStatusListResponse) err := c.cc.Invoke(ctx, "/pb.Master/QueryStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) ShowDDLLocks(ctx context.Context, in *ShowDDLLocksRequest, opts ...grpc.CallOption) (*ShowDDLLocksResponse, error) { out := new(ShowDDLLocksResponse) err := c.cc.Invoke(ctx, "/pb.Master/ShowDDLLocks", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) UnlockDDLLock(ctx context.Context, in *UnlockDDLLockRequest, opts ...grpc.CallOption) (*UnlockDDLLockResponse, error) { out := new(UnlockDDLLockResponse) err := c.cc.Invoke(ctx, "/pb.Master/UnlockDDLLock", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OperateWorkerRelayTask(ctx context.Context, in *OperateWorkerRelayRequest, opts ...grpc.CallOption) (*OperateWorkerRelayResponse, error) { out := new(OperateWorkerRelayResponse) err := c.cc.Invoke(ctx, "/pb.Master/OperateWorkerRelayTask", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) PurgeWorkerRelay(ctx context.Context, in *PurgeWorkerRelayRequest, opts ...grpc.CallOption) (*PurgeWorkerRelayResponse, error) { out := new(PurgeWorkerRelayResponse) err := c.cc.Invoke(ctx, "/pb.Master/PurgeWorkerRelay", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) CheckTask(ctx context.Context, in *CheckTaskRequest, opts ...grpc.CallOption) (*CheckTaskResponse, error) { out := new(CheckTaskResponse) err := c.cc.Invoke(ctx, "/pb.Master/CheckTask", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OperateSource(ctx context.Context, in *OperateSourceRequest, opts ...grpc.CallOption) (*OperateSourceResponse, error) { out := new(OperateSourceResponse) err := c.cc.Invoke(ctx, "/pb.Master/OperateSource", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) RegisterWorker(ctx context.Context, in *RegisterWorkerRequest, opts ...grpc.CallOption) (*RegisterWorkerResponse, error) { out := new(RegisterWorkerResponse) err := c.cc.Invoke(ctx, "/pb.Master/RegisterWorker", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OfflineMember(ctx context.Context, in *OfflineMemberRequest, opts ...grpc.CallOption) (*OfflineMemberResponse, error) { out := new(OfflineMemberResponse) err := c.cc.Invoke(ctx, "/pb.Master/OfflineMember", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OperateLeader(ctx context.Context, in *OperateLeaderRequest, opts ...grpc.CallOption) (*OperateLeaderResponse, error) { out := new(OperateLeaderResponse) err := c.cc.Invoke(ctx, "/pb.Master/OperateLeader", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) ListMember(ctx context.Context, in *ListMemberRequest, opts ...grpc.CallOption) (*ListMemberResponse, error) { out := new(ListMemberResponse) err := c.cc.Invoke(ctx, "/pb.Master/ListMember", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OperateSchema(ctx context.Context, in *OperateSchemaRequest, opts ...grpc.CallOption) (*OperateSchemaResponse, error) { out := new(OperateSchemaResponse) err := c.cc.Invoke(ctx, "/pb.Master/OperateSchema", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) GetSubTaskCfg(ctx context.Context, in *GetSubTaskCfgRequest, opts ...grpc.CallOption) (*GetSubTaskCfgResponse, error) { out := new(GetSubTaskCfgResponse) err := c.cc.Invoke(ctx, "/pb.Master/GetSubTaskCfg", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) GetCfg(ctx context.Context, in *GetCfgRequest, opts ...grpc.CallOption) (*GetCfgResponse, error) { out := new(GetCfgResponse) err := c.cc.Invoke(ctx, "/pb.Master/GetCfg", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) HandleError(ctx context.Context, in *HandleErrorRequest, opts ...grpc.CallOption) (*HandleErrorResponse, error) { out := new(HandleErrorResponse) err := c.cc.Invoke(ctx, "/pb.Master/HandleError", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) GetMasterCfg(ctx context.Context, in *GetMasterCfgRequest, opts ...grpc.CallOption) (*GetMasterCfgResponse, error) { out := new(GetMasterCfgResponse) err := c.cc.Invoke(ctx, "/pb.Master/GetMasterCfg", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) TransferSource(ctx context.Context, in *TransferSourceRequest, opts ...grpc.CallOption) (*TransferSourceResponse, error) { out := new(TransferSourceResponse) err := c.cc.Invoke(ctx, "/pb.Master/TransferSource", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OperateRelay(ctx context.Context, in *OperateRelayRequest, opts ...grpc.CallOption) (*OperateRelayResponse, error) { out := new(OperateRelayResponse) err := c.cc.Invoke(ctx, "/pb.Master/OperateRelay", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) StartValidation(ctx context.Context, in *StartValidationRequest, opts ...grpc.CallOption) (*StartValidationResponse, error) { out := new(StartValidationResponse) err := c.cc.Invoke(ctx, "/pb.Master/StartValidation", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) StopValidation(ctx context.Context, in *StopValidationRequest, opts ...grpc.CallOption) (*StopValidationResponse, error) { out := new(StopValidationResponse) err := c.cc.Invoke(ctx, "/pb.Master/StopValidation", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) GetValidationStatus(ctx context.Context, in *GetValidationStatusRequest, opts ...grpc.CallOption) (*GetValidationStatusResponse, error) { out := new(GetValidationStatusResponse) err := c.cc.Invoke(ctx, "/pb.Master/GetValidationStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) GetValidationError(ctx context.Context, in *GetValidationErrorRequest, opts ...grpc.CallOption) (*GetValidationErrorResponse, error) { out := new(GetValidationErrorResponse) err := c.cc.Invoke(ctx, "/pb.Master/GetValidationError", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) OperateValidationError(ctx context.Context, in *OperateValidationErrorRequest, opts ...grpc.CallOption) (*OperateValidationErrorResponse, error) { out := new(OperateValidationErrorResponse) err := c.cc.Invoke(ctx, "/pb.Master/OperateValidationError", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) UpdateValidation(ctx context.Context, in *UpdateValidationRequest, opts ...grpc.CallOption) (*UpdateValidationResponse, error) { out := new(UpdateValidationResponse) err := c.cc.Invoke(ctx, "/pb.Master/UpdateValidation", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) Encrypt(ctx context.Context, in *EncryptRequest, opts ...grpc.CallOption) (*EncryptResponse, error) { out := new(EncryptResponse) err := c.cc.Invoke(ctx, "/pb.Master/Encrypt", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) ListTaskConfigs(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListTaskConfigsResponse, error) { out := new(ListTaskConfigsResponse) err := c.cc.Invoke(ctx, "/pb.Master/ListTaskConfigs", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *masterClient) ListSourceConfigs(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ListSourceConfigsResponse, error) { out := new(ListSourceConfigsResponse) err := c.cc.Invoke(ctx, "/pb.Master/ListSourceConfigs", in, out, opts...) if err != nil { return nil, err } return out, nil } // MasterServer is the server API for Master service. type MasterServer interface { StartTask(context.Context, *StartTaskRequest) (*StartTaskResponse, error) OperateTask(context.Context, *OperateTaskRequest) (*OperateTaskResponse, error) UpdateTask(context.Context, *UpdateTaskRequest) (*UpdateTaskResponse, error) QueryStatus(context.Context, *QueryStatusListRequest) (*QueryStatusListResponse, error) // show un-resolved DDL locks ShowDDLLocks(context.Context, *ShowDDLLocksRequest) (*ShowDDLLocksResponse, error) // used by dmctl to manually unlock DDL lock UnlockDDLLock(context.Context, *UnlockDDLLockRequest) (*UnlockDDLLockResponse, error) // OperateWorkerRelayTask requests some dm-workers to operate relay unit OperateWorkerRelayTask(context.Context, *OperateWorkerRelayRequest) (*OperateWorkerRelayResponse, error) // PurgeWorkerRelay purges relay log files for some dm-workers PurgeWorkerRelay(context.Context, *PurgeWorkerRelayRequest) (*PurgeWorkerRelayResponse, error) // CheckTask checks legality of task configuration CheckTask(context.Context, *CheckTaskRequest) (*CheckTaskResponse, error) // Operate an upstream MySQL source. OperateSource(context.Context, *OperateSourceRequest) (*OperateSourceResponse, error) // RegisterWorker register the dm-workers. RegisterWorker(context.Context, *RegisterWorkerRequest) (*RegisterWorkerResponse, error) // OfflineMember offline the dm cluster's members (master/worker). OfflineMember(context.Context, *OfflineMemberRequest) (*OfflineMemberResponse, error) // OperateLeader do some operate on master: // - evict leader: make the master resign if it is leader, and will not campaign the leader again // - cancel evict leader: the master can campaign leader again. OperateLeader(context.Context, *OperateLeaderRequest) (*OperateLeaderResponse, error) // ListMember list member information ListMember(context.Context, *ListMemberRequest) (*ListMemberResponse, error) OperateSchema(context.Context, *OperateSchemaRequest) (*OperateSchemaResponse, error) GetSubTaskCfg(context.Context, *GetSubTaskCfgRequest) (*GetSubTaskCfgResponse, error) // GetCfg get config GetCfg(context.Context, *GetCfgRequest) (*GetCfgResponse, error) HandleError(context.Context, *HandleErrorRequest) (*HandleErrorResponse, error) GetMasterCfg(context.Context, *GetMasterCfgRequest) (*GetMasterCfgResponse, error) TransferSource(context.Context, *TransferSourceRequest) (*TransferSourceResponse, error) OperateRelay(context.Context, *OperateRelayRequest) (*OperateRelayResponse, error) StartValidation(context.Context, *StartValidationRequest) (*StartValidationResponse, error) StopValidation(context.Context, *StopValidationRequest) (*StopValidationResponse, error) GetValidationStatus(context.Context, *GetValidationStatusRequest) (*GetValidationStatusResponse, error) GetValidationError(context.Context, *GetValidationErrorRequest) (*GetValidationErrorResponse, error) OperateValidationError(context.Context, *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) UpdateValidation(context.Context, *UpdateValidationRequest) (*UpdateValidationResponse, error) // Encrypt encrypts the plaintext using the secret key of dm-master Encrypt(context.Context, *EncryptRequest) (*EncryptResponse, error) ListTaskConfigs(context.Context, *emptypb.Empty) (*ListTaskConfigsResponse, error) ListSourceConfigs(context.Context, *emptypb.Empty) (*ListSourceConfigsResponse, error) } // UnimplementedMasterServer can be embedded to have forward compatible implementations. type UnimplementedMasterServer struct { } func (*UnimplementedMasterServer) StartTask(ctx context.Context, req *StartTaskRequest) (*StartTaskResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartTask not implemented") } func (*UnimplementedMasterServer) OperateTask(ctx context.Context, req *OperateTaskRequest) (*OperateTaskResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateTask not implemented") } func (*UnimplementedMasterServer) UpdateTask(ctx context.Context, req *UpdateTaskRequest) (*UpdateTaskResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateTask not implemented") } func (*UnimplementedMasterServer) QueryStatus(ctx context.Context, req *QueryStatusListRequest) (*QueryStatusListResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method QueryStatus not implemented") } func (*UnimplementedMasterServer) ShowDDLLocks(ctx context.Context, req *ShowDDLLocksRequest) (*ShowDDLLocksResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ShowDDLLocks not implemented") } func (*UnimplementedMasterServer) UnlockDDLLock(ctx context.Context, req *UnlockDDLLockRequest) (*UnlockDDLLockResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UnlockDDLLock not implemented") } func (*UnimplementedMasterServer) OperateWorkerRelayTask(ctx context.Context, req *OperateWorkerRelayRequest) (*OperateWorkerRelayResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateWorkerRelayTask not implemented") } func (*UnimplementedMasterServer) PurgeWorkerRelay(ctx context.Context, req *PurgeWorkerRelayRequest) (*PurgeWorkerRelayResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method PurgeWorkerRelay not implemented") } func (*UnimplementedMasterServer) CheckTask(ctx context.Context, req *CheckTaskRequest) (*CheckTaskResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CheckTask not implemented") } func (*UnimplementedMasterServer) OperateSource(ctx context.Context, req *OperateSourceRequest) (*OperateSourceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateSource not implemented") } func (*UnimplementedMasterServer) RegisterWorker(ctx context.Context, req *RegisterWorkerRequest) (*RegisterWorkerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RegisterWorker not implemented") } func (*UnimplementedMasterServer) OfflineMember(ctx context.Context, req *OfflineMemberRequest) (*OfflineMemberResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OfflineMember not implemented") } func (*UnimplementedMasterServer) OperateLeader(ctx context.Context, req *OperateLeaderRequest) (*OperateLeaderResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateLeader not implemented") } func (*UnimplementedMasterServer) ListMember(ctx context.Context, req *ListMemberRequest) (*ListMemberResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListMember not implemented") } func (*UnimplementedMasterServer) OperateSchema(ctx context.Context, req *OperateSchemaRequest) (*OperateSchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateSchema not implemented") } func (*UnimplementedMasterServer) GetSubTaskCfg(ctx context.Context, req *GetSubTaskCfgRequest) (*GetSubTaskCfgResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSubTaskCfg not implemented") } func (*UnimplementedMasterServer) GetCfg(ctx context.Context, req *GetCfgRequest) (*GetCfgResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetCfg not implemented") } func (*UnimplementedMasterServer) HandleError(ctx context.Context, req *HandleErrorRequest) (*HandleErrorResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method HandleError not implemented") } func (*UnimplementedMasterServer) GetMasterCfg(ctx context.Context, req *GetMasterCfgRequest) (*GetMasterCfgResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetMasterCfg not implemented") } func (*UnimplementedMasterServer) TransferSource(ctx context.Context, req *TransferSourceRequest) (*TransferSourceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TransferSource not implemented") } func (*UnimplementedMasterServer) OperateRelay(ctx context.Context, req *OperateRelayRequest) (*OperateRelayResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateRelay not implemented") } func (*UnimplementedMasterServer) StartValidation(ctx context.Context, req *StartValidationRequest) (*StartValidationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StartValidation not implemented") } func (*UnimplementedMasterServer) StopValidation(ctx context.Context, req *StopValidationRequest) (*StopValidationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StopValidation not implemented") } func (*UnimplementedMasterServer) GetValidationStatus(ctx context.Context, req *GetValidationStatusRequest) (*GetValidationStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetValidationStatus not implemented") } func (*UnimplementedMasterServer) GetValidationError(ctx context.Context, req *GetValidationErrorRequest) (*GetValidationErrorResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetValidationError not implemented") } func (*UnimplementedMasterServer) OperateValidationError(ctx context.Context, req *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateValidationError not implemented") } func (*UnimplementedMasterServer) UpdateValidation(ctx context.Context, req *UpdateValidationRequest) (*UpdateValidationResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateValidation not implemented") } func (*UnimplementedMasterServer) Encrypt(ctx context.Context, req *EncryptRequest) (*EncryptResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Encrypt not implemented") } func (*UnimplementedMasterServer) ListTaskConfigs(ctx context.Context, req *emptypb.Empty) (*ListTaskConfigsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListTaskConfigs not implemented") } func (*UnimplementedMasterServer) ListSourceConfigs(ctx context.Context, req *emptypb.Empty) (*ListSourceConfigsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListSourceConfigs not implemented") } func RegisterMasterServer(s *grpc.Server, srv MasterServer) { s.RegisterService(&_Master_serviceDesc, srv) } func _Master_StartTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartTaskRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).StartTask(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/StartTask", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).StartTask(ctx, req.(*StartTaskRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OperateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateTaskRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OperateTask(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OperateTask", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OperateTask(ctx, req.(*OperateTaskRequest)) } return interceptor(ctx, in, info, handler) } func _Master_UpdateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateTaskRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).UpdateTask(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/UpdateTask", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).UpdateTask(ctx, req.(*UpdateTaskRequest)) } return interceptor(ctx, in, info, handler) } func _Master_QueryStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryStatusListRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).QueryStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/QueryStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).QueryStatus(ctx, req.(*QueryStatusListRequest)) } return interceptor(ctx, in, info, handler) } func _Master_ShowDDLLocks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ShowDDLLocksRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).ShowDDLLocks(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/ShowDDLLocks", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).ShowDDLLocks(ctx, req.(*ShowDDLLocksRequest)) } return interceptor(ctx, in, info, handler) } func _Master_UnlockDDLLock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UnlockDDLLockRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).UnlockDDLLock(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/UnlockDDLLock", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).UnlockDDLLock(ctx, req.(*UnlockDDLLockRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OperateWorkerRelayTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateWorkerRelayRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OperateWorkerRelayTask(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OperateWorkerRelayTask", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OperateWorkerRelayTask(ctx, req.(*OperateWorkerRelayRequest)) } return interceptor(ctx, in, info, handler) } func _Master_PurgeWorkerRelay_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PurgeWorkerRelayRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).PurgeWorkerRelay(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/PurgeWorkerRelay", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).PurgeWorkerRelay(ctx, req.(*PurgeWorkerRelayRequest)) } return interceptor(ctx, in, info, handler) } func _Master_CheckTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CheckTaskRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).CheckTask(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/CheckTask", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).CheckTask(ctx, req.(*CheckTaskRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OperateSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateSourceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OperateSource(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OperateSource", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OperateSource(ctx, req.(*OperateSourceRequest)) } return interceptor(ctx, in, info, handler) } func _Master_RegisterWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RegisterWorkerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).RegisterWorker(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/RegisterWorker", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).RegisterWorker(ctx, req.(*RegisterWorkerRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OfflineMember_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OfflineMemberRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OfflineMember(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OfflineMember", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OfflineMember(ctx, req.(*OfflineMemberRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OperateLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateLeaderRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OperateLeader(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OperateLeader", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OperateLeader(ctx, req.(*OperateLeaderRequest)) } return interceptor(ctx, in, info, handler) } func _Master_ListMember_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListMemberRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).ListMember(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/ListMember", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).ListMember(ctx, req.(*ListMemberRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OperateSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateSchemaRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OperateSchema(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OperateSchema", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OperateSchema(ctx, req.(*OperateSchemaRequest)) } return interceptor(ctx, in, info, handler) } func _Master_GetSubTaskCfg_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetSubTaskCfgRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).GetSubTaskCfg(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/GetSubTaskCfg", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).GetSubTaskCfg(ctx, req.(*GetSubTaskCfgRequest)) } return interceptor(ctx, in, info, handler) } func _Master_GetCfg_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetCfgRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).GetCfg(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/GetCfg", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).GetCfg(ctx, req.(*GetCfgRequest)) } return interceptor(ctx, in, info, handler) } func _Master_HandleError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HandleErrorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).HandleError(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/HandleError", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).HandleError(ctx, req.(*HandleErrorRequest)) } return interceptor(ctx, in, info, handler) } func _Master_GetMasterCfg_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetMasterCfgRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).GetMasterCfg(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/GetMasterCfg", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).GetMasterCfg(ctx, req.(*GetMasterCfgRequest)) } return interceptor(ctx, in, info, handler) } func _Master_TransferSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TransferSourceRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).TransferSource(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/TransferSource", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).TransferSource(ctx, req.(*TransferSourceRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OperateRelay_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateRelayRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OperateRelay(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OperateRelay", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OperateRelay(ctx, req.(*OperateRelayRequest)) } return interceptor(ctx, in, info, handler) } func _Master_StartValidation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StartValidationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).StartValidation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/StartValidation", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).StartValidation(ctx, req.(*StartValidationRequest)) } return interceptor(ctx, in, info, handler) } func _Master_StopValidation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StopValidationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).StopValidation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/StopValidation", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).StopValidation(ctx, req.(*StopValidationRequest)) } return interceptor(ctx, in, info, handler) } func _Master_GetValidationStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetValidationStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).GetValidationStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/GetValidationStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).GetValidationStatus(ctx, req.(*GetValidationStatusRequest)) } return interceptor(ctx, in, info, handler) } func _Master_GetValidationError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetValidationErrorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).GetValidationError(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/GetValidationError", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).GetValidationError(ctx, req.(*GetValidationErrorRequest)) } return interceptor(ctx, in, info, handler) } func _Master_OperateValidationError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateValidationErrorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).OperateValidationError(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/OperateValidationError", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).OperateValidationError(ctx, req.(*OperateValidationErrorRequest)) } return interceptor(ctx, in, info, handler) } func _Master_UpdateValidation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateValidationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).UpdateValidation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/UpdateValidation", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).UpdateValidation(ctx, req.(*UpdateValidationRequest)) } return interceptor(ctx, in, info, handler) } func _Master_Encrypt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EncryptRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).Encrypt(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/Encrypt", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).Encrypt(ctx, req.(*EncryptRequest)) } return interceptor(ctx, in, info, handler) } func _Master_ListTaskConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(emptypb.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).ListTaskConfigs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/ListTaskConfigs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).ListTaskConfigs(ctx, req.(*emptypb.Empty)) } return interceptor(ctx, in, info, handler) } func _Master_ListSourceConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(emptypb.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(MasterServer).ListSourceConfigs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Master/ListSourceConfigs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MasterServer).ListSourceConfigs(ctx, req.(*emptypb.Empty)) } return interceptor(ctx, in, info, handler) } var _Master_serviceDesc = grpc.ServiceDesc{ ServiceName: "pb.Master", HandlerType: (*MasterServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "StartTask", Handler: _Master_StartTask_Handler, }, { MethodName: "OperateTask", Handler: _Master_OperateTask_Handler, }, { MethodName: "UpdateTask", Handler: _Master_UpdateTask_Handler, }, { MethodName: "QueryStatus", Handler: _Master_QueryStatus_Handler, }, { MethodName: "ShowDDLLocks", Handler: _Master_ShowDDLLocks_Handler, }, { MethodName: "UnlockDDLLock", Handler: _Master_UnlockDDLLock_Handler, }, { MethodName: "OperateWorkerRelayTask", Handler: _Master_OperateWorkerRelayTask_Handler, }, { MethodName: "PurgeWorkerRelay", Handler: _Master_PurgeWorkerRelay_Handler, }, { MethodName: "CheckTask", Handler: _Master_CheckTask_Handler, }, { MethodName: "OperateSource", Handler: _Master_OperateSource_Handler, }, { MethodName: "RegisterWorker", Handler: _Master_RegisterWorker_Handler, }, { MethodName: "OfflineMember", Handler: _Master_OfflineMember_Handler, }, { MethodName: "OperateLeader", Handler: _Master_OperateLeader_Handler, }, { MethodName: "ListMember", Handler: _Master_ListMember_Handler, }, { MethodName: "OperateSchema", Handler: _Master_OperateSchema_Handler, }, { MethodName: "GetSubTaskCfg", Handler: _Master_GetSubTaskCfg_Handler, }, { MethodName: "GetCfg", Handler: _Master_GetCfg_Handler, }, { MethodName: "HandleError", Handler: _Master_HandleError_Handler, }, { MethodName: "GetMasterCfg", Handler: _Master_GetMasterCfg_Handler, }, { MethodName: "TransferSource", Handler: _Master_TransferSource_Handler, }, { MethodName: "OperateRelay", Handler: _Master_OperateRelay_Handler, }, { MethodName: "StartValidation", Handler: _Master_StartValidation_Handler, }, { MethodName: "StopValidation", Handler: _Master_StopValidation_Handler, }, { MethodName: "GetValidationStatus", Handler: _Master_GetValidationStatus_Handler, }, { MethodName: "GetValidationError", Handler: _Master_GetValidationError_Handler, }, { MethodName: "OperateValidationError", Handler: _Master_OperateValidationError_Handler, }, { MethodName: "UpdateValidation", Handler: _Master_UpdateValidation_Handler, }, { MethodName: "Encrypt", Handler: _Master_Encrypt_Handler, }, { MethodName: "ListTaskConfigs", Handler: _Master_ListTaskConfigs_Handler, }, { MethodName: "ListSourceConfigs", Handler: _Master_ListSourceConfigs_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "dmmaster.proto", } func (m *StartTaskRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StartTaskRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StartTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.StartTime) > 0 { i -= len(m.StartTime) copy(dAtA[i:], m.StartTime) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.StartTime))) i-- dAtA[i] = 0x22 } if m.RemoveMeta { i-- if m.RemoveMeta { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x18 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x12 } } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *StartTaskResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StartTaskResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StartTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.CheckResult) > 0 { i -= len(m.CheckResult) copy(dAtA[i:], m.CheckResult) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.CheckResult))) i-- dAtA[i] = 0x22 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateTaskRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateTaskRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x1a } } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x12 } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateTaskResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateTaskResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x1a } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x10 } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x12 } } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *UpdateTaskResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UpdateTaskResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *UpdateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.CheckResult) > 0 { i -= len(m.CheckResult) copy(dAtA[i:], m.CheckResult) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.CheckResult))) i-- dAtA[i] = 0x22 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *QueryStatusListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *QueryStatusListRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *QueryStatusListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x12 } } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *QueryStatusListResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *QueryStatusListResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *QueryStatusListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *ShowDDLLocksRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ShowDDLLocksRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ShowDDLLocksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x12 } } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *DDLLock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *DDLLock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *DDLLock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Unsynced) > 0 { for iNdEx := len(m.Unsynced) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Unsynced[iNdEx]) copy(dAtA[i:], m.Unsynced[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Unsynced[iNdEx]))) i-- dAtA[i] = 0x3a } } if len(m.Synced) > 0 { for iNdEx := len(m.Synced) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Synced[iNdEx]) copy(dAtA[i:], m.Synced[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Synced[iNdEx]))) i-- dAtA[i] = 0x32 } } if len(m.DDLs) > 0 { for iNdEx := len(m.DDLs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.DDLs[iNdEx]) copy(dAtA[i:], m.DDLs[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.DDLs[iNdEx]))) i-- dAtA[i] = 0x2a } } if len(m.Owner) > 0 { i -= len(m.Owner) copy(dAtA[i:], m.Owner) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Owner))) i-- dAtA[i] = 0x22 } if len(m.Mode) > 0 { i -= len(m.Mode) copy(dAtA[i:], m.Mode) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Mode))) i-- dAtA[i] = 0x1a } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0x12 } if len(m.ID) > 0 { i -= len(m.ID) copy(dAtA[i:], m.ID) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.ID))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *ShowDDLLocksResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ShowDDLLocksResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ShowDDLLocksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Locks) > 0 { for iNdEx := len(m.Locks) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Locks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *UnlockDDLLockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UnlockDDLLockRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *UnlockDDLLockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Table) > 0 { i -= len(m.Table) copy(dAtA[i:], m.Table) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Table))) i-- dAtA[i] = 0x3a } if len(m.Database) > 0 { i -= len(m.Database) copy(dAtA[i:], m.Database) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Database))) i-- dAtA[i] = 0x32 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x2a } } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x20 } if m.ForceRemove { i-- if m.ForceRemove { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x18 } if len(m.ReplaceOwner) > 0 { i -= len(m.ReplaceOwner) copy(dAtA[i:], m.ReplaceOwner) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.ReplaceOwner))) i-- dAtA[i] = 0x12 } if len(m.ID) > 0 { i -= len(m.ID) copy(dAtA[i:], m.ID) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.ID))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *UnlockDDLLockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UnlockDDLLockResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *UnlockDDLLockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateWorkerRelayRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateWorkerRelayRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateWorkerRelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x12 } } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateWorkerRelayResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateWorkerRelayResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateWorkerRelayResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x1a } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x10 } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *PurgeWorkerRelayRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PurgeWorkerRelayRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *PurgeWorkerRelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SubDir) > 0 { i -= len(m.SubDir) copy(dAtA[i:], m.SubDir) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.SubDir))) i-- dAtA[i] = 0x2a } if len(m.Filename) > 0 { i -= len(m.Filename) copy(dAtA[i:], m.Filename) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Filename))) i-- dAtA[i] = 0x22 } if m.Time != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Time)) i-- dAtA[i] = 0x18 } if m.Inactive { i-- if m.Inactive { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x10 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *PurgeWorkerRelayResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PurgeWorkerRelayResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *PurgeWorkerRelayResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *CheckTaskRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckTaskRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *CheckTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.StartTime) > 0 { i -= len(m.StartTime) copy(dAtA[i:], m.StartTime) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.StartTime))) i-- dAtA[i] = 0x22 } if m.WarnCnt != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.WarnCnt)) i-- dAtA[i] = 0x18 } if m.ErrCnt != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.ErrCnt)) i-- dAtA[i] = 0x10 } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *CheckTaskResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckTaskResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *CheckTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateSourceRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateSourceRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.WorkerName) > 0 { i -= len(m.WorkerName) copy(dAtA[i:], m.WorkerName) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.WorkerName))) i-- dAtA[i] = 0x22 } if len(m.SourceID) > 0 { for iNdEx := len(m.SourceID) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.SourceID[iNdEx]) copy(dAtA[i:], m.SourceID[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.SourceID[iNdEx]))) i-- dAtA[i] = 0x1a } } if len(m.Config) > 0 { for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Config[iNdEx]) copy(dAtA[i:], m.Config[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Config[iNdEx]))) i-- dAtA[i] = 0x12 } } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateSourceResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateSourceResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *RegisterWorkerRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RegisterWorkerRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RegisterWorkerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Address) > 0 { i -= len(m.Address) copy(dAtA[i:], m.Address) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Address))) i-- dAtA[i] = 0x12 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *RegisterWorkerResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RegisterWorkerResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RegisterWorkerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SecretKey) > 0 { i -= len(m.SecretKey) copy(dAtA[i:], m.SecretKey) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.SecretKey))) i-- dAtA[i] = 0x1a } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OfflineMemberRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OfflineMemberRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OfflineMemberRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x12 } if len(m.Type) > 0 { i -= len(m.Type) copy(dAtA[i:], m.Type) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *OfflineMemberResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OfflineMemberResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OfflineMemberResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateLeaderRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateLeaderRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateLeaderResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateLeaderResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *MasterInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *MasterInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *MasterInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.ClientURLs) > 0 { for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.ClientURLs[iNdEx]) copy(dAtA[i:], m.ClientURLs[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.ClientURLs[iNdEx]))) i-- dAtA[i] = 0x2a } } if len(m.PeerURLs) > 0 { for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.PeerURLs[iNdEx]) copy(dAtA[i:], m.PeerURLs[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) i-- dAtA[i] = 0x22 } } if m.Alive { i-- if m.Alive { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x18 } if m.MemberID != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.MemberID)) i-- dAtA[i] = 0x10 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *WorkerInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *WorkerInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *WorkerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0x22 } if len(m.Stage) > 0 { i -= len(m.Stage) copy(dAtA[i:], m.Stage) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Stage))) i-- dAtA[i] = 0x1a } if len(m.Addr) > 0 { i -= len(m.Addr) copy(dAtA[i:], m.Addr) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Addr))) i-- dAtA[i] = 0x12 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *ListLeaderMember) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ListLeaderMember) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ListLeaderMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Addr) > 0 { i -= len(m.Addr) copy(dAtA[i:], m.Addr) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Addr))) i-- dAtA[i] = 0x1a } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x12 } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *ListMasterMember) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ListMasterMember) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ListMasterMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Masters) > 0 { for iNdEx := len(m.Masters) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Masters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *ListWorkerMember) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ListWorkerMember) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ListWorkerMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Workers) > 0 { for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *Members) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Members) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Members) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Member != nil { { size := m.Member.Size() i -= size if _, err := m.Member.MarshalTo(dAtA[i:]); err != nil { return 0, err } } } return len(dAtA) - i, nil } func (m *Members_Leader) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Members_Leader) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Leader != nil { { size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *Members_Master) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Members_Master) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Master != nil { { size, err := m.Master.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } return len(dAtA) - i, nil } func (m *Members_Worker) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Members_Worker) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Worker != nil { { size, err := m.Worker.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } return len(dAtA) - i, nil } func (m *ListMemberRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ListMemberRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ListMemberRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Names) > 0 { for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Names[iNdEx]) copy(dAtA[i:], m.Names[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Names[iNdEx]))) i-- dAtA[i] = 0x22 } } if m.Worker { i-- if m.Worker { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x18 } if m.Master { i-- if m.Master { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x10 } if m.Leader { i-- if m.Leader { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *ListMemberResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ListMemberResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ListMemberResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Members) > 0 { for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateSchemaRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateSchemaRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.FromTarget { i-- if m.FromTarget { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x50 } if m.FromSource { i-- if m.FromSource { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x48 } if m.Sync { i-- if m.Sync { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x40 } if m.Flush { i-- if m.Flush { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x38 } if len(m.Schema) > 0 { i -= len(m.Schema) copy(dAtA[i:], m.Schema) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Schema))) i-- dAtA[i] = 0x32 } if len(m.Table) > 0 { i -= len(m.Table) copy(dAtA[i:], m.Table) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Table))) i-- dAtA[i] = 0x2a } if len(m.Database) > 0 { i -= len(m.Database) copy(dAtA[i:], m.Database) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Database))) i-- dAtA[i] = 0x22 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x1a } } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0x12 } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateSchemaResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateSchemaResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateSchemaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *GetSubTaskCfgRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetSubTaskCfgRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetSubTaskCfgRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *GetSubTaskCfgResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetSubTaskCfgResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetSubTaskCfgResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Cfgs) > 0 { for iNdEx := len(m.Cfgs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Cfgs[iNdEx]) copy(dAtA[i:], m.Cfgs[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Cfgs[iNdEx]))) i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *GetCfgRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetCfgRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetCfgRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x12 } if m.Type != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Type)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *GetCfgResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetCfgResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetCfgResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Cfg) > 0 { i -= len(m.Cfg) copy(dAtA[i:], m.Cfg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Cfg))) i-- dAtA[i] = 0x1a } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *GetMasterCfgRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetMasterCfgRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetMasterCfgRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *GetMasterCfgResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetMasterCfgResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetMasterCfgResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Cfg) > 0 { i -= len(m.Cfg) copy(dAtA[i:], m.Cfg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Cfg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *HandleErrorRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HandleErrorRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *HandleErrorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sqls) > 0 { for iNdEx := len(m.Sqls) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sqls[iNdEx]) copy(dAtA[i:], m.Sqls[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sqls[iNdEx]))) i-- dAtA[i] = 0x2a } } if len(m.BinlogPos) > 0 { i -= len(m.BinlogPos) copy(dAtA[i:], m.BinlogPos) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.BinlogPos))) i-- dAtA[i] = 0x22 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x1a } } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0x12 } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *HandleErrorResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HandleErrorResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *HandleErrorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *TransferSourceRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TransferSourceRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *TransferSourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Worker) > 0 { i -= len(m.Worker) copy(dAtA[i:], m.Worker) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Worker))) i-- dAtA[i] = 0x12 } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *TransferSourceResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TransferSourceResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *TransferSourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateRelayRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateRelayRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateRelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Worker) > 0 { for iNdEx := len(m.Worker) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Worker[iNdEx]) copy(dAtA[i:], m.Worker[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Worker[iNdEx]))) i-- dAtA[i] = 0x1a } } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0x12 } if m.Op != 0 { i = encodeVarintDmmaster(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateRelayResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateRelayResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateRelayResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *StartValidationRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StartValidationRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StartValidationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.TaskName) > 0 { i -= len(m.TaskName) copy(dAtA[i:], m.TaskName) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.TaskName))) i-- dAtA[i] = 0x22 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x1a } } if m.StartTime != nil { { size := m.StartTime.Size() i -= size if _, err := m.StartTime.MarshalTo(dAtA[i:]); err != nil { return 0, err } } } if m.Mode != nil { { size := m.Mode.Size() i -= size if _, err := m.Mode.MarshalTo(dAtA[i:]); err != nil { return 0, err } } } return len(dAtA) - i, nil } func (m *StartValidationRequest_ModeValue) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StartValidationRequest_ModeValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) i -= len(m.ModeValue) copy(dAtA[i:], m.ModeValue) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.ModeValue))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *StartValidationRequest_StartTimeValue) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StartValidationRequest_StartTimeValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) i -= len(m.StartTimeValue) copy(dAtA[i:], m.StartTimeValue) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.StartTimeValue))) i-- dAtA[i] = 0x12 return len(dAtA) - i, nil } func (m *StartValidationResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StartValidationResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StartValidationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *StopValidationRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StopValidationRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StopValidationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.TaskName) > 0 { i -= len(m.TaskName) copy(dAtA[i:], m.TaskName) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.TaskName))) i-- dAtA[i] = 0x12 } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *StopValidationResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *StopValidationResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *StopValidationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *UpdateValidationRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UpdateValidationRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *UpdateValidationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.BinlogGTID) > 0 { i -= len(m.BinlogGTID) copy(dAtA[i:], m.BinlogGTID) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.BinlogGTID))) i-- dAtA[i] = 0x22 } if len(m.BinlogPos) > 0 { i -= len(m.BinlogPos) copy(dAtA[i:], m.BinlogPos) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.BinlogPos))) i-- dAtA[i] = 0x1a } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sources[iNdEx]) copy(dAtA[i:], m.Sources[iNdEx]) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Sources[iNdEx]))) i-- dAtA[i] = 0x12 } } if len(m.TaskName) > 0 { i -= len(m.TaskName) copy(dAtA[i:], m.TaskName) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.TaskName))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *UpdateValidationResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UpdateValidationResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *UpdateValidationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmmaster(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *EncryptRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *EncryptRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EncryptRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Plaintext) > 0 { i -= len(m.Plaintext) copy(dAtA[i:], m.Plaintext) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Plaintext))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *EncryptResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *EncryptResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *EncryptResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Ciphertext) > 0 { i -= len(m.Ciphertext) copy(dAtA[i:], m.Ciphertext) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Ciphertext))) i-- dAtA[i] = 0x1a } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *ListTaskConfigsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ListTaskConfigsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ListTaskConfigsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.TaskConfigs) > 0 { for k := range m.TaskConfigs { v := m.TaskConfigs[k] baseI := i i -= len(v) copy(dAtA[i:], v) i = encodeVarintDmmaster(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 i -= len(k) copy(dAtA[i:], k) i = encodeVarintDmmaster(dAtA, i, uint64(len(k))) i-- dAtA[i] = 0xa i = encodeVarintDmmaster(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *ListSourceConfigsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ListSourceConfigsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ListSourceConfigsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SourceConfigs) > 0 { for k := range m.SourceConfigs { v := m.SourceConfigs[k] baseI := i i -= len(v) copy(dAtA[i:], v) i = encodeVarintDmmaster(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 i -= len(k) copy(dAtA[i:], k) i = encodeVarintDmmaster(dAtA, i, uint64(len(k))) i-- dAtA[i] = 0xa i = encodeVarintDmmaster(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmmaster(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func encodeVarintDmmaster(dAtA []byte, offset int, v uint64) int { offset -= sovDmmaster(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *StartTaskRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Task) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } if m.RemoveMeta { n += 2 } l = len(m.StartTime) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *StartTaskResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.CheckResult) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OperateTaskRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *OperateTaskResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *UpdateTaskRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Task) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *UpdateTaskResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.CheckResult) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *QueryStatusListRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *QueryStatusListResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *ShowDDLLocksRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Task) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *DDLLock) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.ID) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Task) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Mode) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Owner) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.DDLs) > 0 { for _, s := range m.DDLs { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } if len(m.Synced) > 0 { for _, s := range m.Synced { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } if len(m.Unsynced) > 0 { for _, s := range m.Unsynced { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *ShowDDLLocksResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Locks) > 0 { for _, e := range m.Locks { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *UnlockDDLLockRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.ID) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.ReplaceOwner) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if m.ForceRemove { n += 2 } if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.Database) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Table) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *UnlockDDLLockResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OperateWorkerRelayRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *OperateWorkerRelayResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *PurgeWorkerRelayRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } if m.Inactive { n += 2 } if m.Time != 0 { n += 1 + sovDmmaster(uint64(m.Time)) } l = len(m.Filename) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.SubDir) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *PurgeWorkerRelayResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *CheckTaskRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Task) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if m.ErrCnt != 0 { n += 1 + sovDmmaster(uint64(m.ErrCnt)) } if m.WarnCnt != 0 { n += 1 + sovDmmaster(uint64(m.WarnCnt)) } l = len(m.StartTime) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *CheckTaskResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OperateSourceRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } if len(m.Config) > 0 { for _, s := range m.Config { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } if len(m.SourceID) > 0 { for _, s := range m.SourceID { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.WorkerName) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OperateSourceResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *RegisterWorkerRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Address) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *RegisterWorkerResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.SecretKey) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OfflineMemberRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Type) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OfflineMemberResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OperateLeaderRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } return n } func (m *OperateLeaderResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *MasterInfo) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if m.MemberID != 0 { n += 1 + sovDmmaster(uint64(m.MemberID)) } if m.Alive { n += 2 } if len(m.PeerURLs) > 0 { for _, s := range m.PeerURLs { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } if len(m.ClientURLs) > 0 { for _, s := range m.ClientURLs { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *WorkerInfo) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Addr) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Stage) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Source) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *ListLeaderMember) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Addr) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *ListMasterMember) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Masters) > 0 { for _, e := range m.Masters { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *ListWorkerMember) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Workers) > 0 { for _, e := range m.Workers { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *Members) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Member != nil { n += m.Member.Size() } return n } func (m *Members_Leader) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Leader != nil { l = m.Leader.Size() n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *Members_Master) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Master != nil { l = m.Master.Size() n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *Members_Worker) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Worker != nil { l = m.Worker.Size() n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *ListMemberRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Leader { n += 2 } if m.Master { n += 2 } if m.Worker { n += 2 } if len(m.Names) > 0 { for _, s := range m.Names { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *ListMemberResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Members) > 0 { for _, e := range m.Members { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *OperateSchemaRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } l = len(m.Task) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.Database) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Table) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Schema) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if m.Flush { n += 2 } if m.Sync { n += 2 } if m.FromSource { n += 2 } if m.FromTarget { n += 2 } return n } func (m *OperateSchemaResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *GetSubTaskCfgRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *GetSubTaskCfgResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Cfgs) > 0 { for _, s := range m.Cfgs { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *GetCfgRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Type != 0 { n += 1 + sovDmmaster(uint64(m.Type)) } l = len(m.Name) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *GetCfgResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Cfg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *GetMasterCfgRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *GetMasterCfgResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Cfg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *HandleErrorRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } l = len(m.Task) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.BinlogPos) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sqls) > 0 { for _, s := range m.Sqls { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *HandleErrorResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *TransferSourceRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Source) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Worker) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *TransferSourceResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *OperateRelayRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmmaster(uint64(m.Op)) } l = len(m.Source) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Worker) > 0 { for _, s := range m.Worker { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *OperateRelayResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *StartValidationRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Mode != nil { n += m.Mode.Size() } if m.StartTime != nil { n += m.StartTime.Size() } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.TaskName) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *StartValidationRequest_ModeValue) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.ModeValue) n += 1 + l + sovDmmaster(uint64(l)) return n } func (m *StartValidationRequest_StartTimeValue) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.StartTimeValue) n += 1 + l + sovDmmaster(uint64(l)) return n } func (m *StartValidationResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *StopValidationRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.TaskName) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *StopValidationResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *UpdateValidationRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.TaskName) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, s := range m.Sources { l = len(s) n += 1 + l + sovDmmaster(uint64(l)) } } l = len(m.BinlogPos) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.BinlogGTID) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *UpdateValidationResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.Sources) > 0 { for _, e := range m.Sources { l = e.Size() n += 1 + l + sovDmmaster(uint64(l)) } } return n } func (m *EncryptRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Plaintext) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *EncryptResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } l = len(m.Ciphertext) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } return n } func (m *ListTaskConfigsResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.TaskConfigs) > 0 { for k, v := range m.TaskConfigs { _ = k _ = v mapEntrySize := 1 + len(k) + sovDmmaster(uint64(len(k))) + 1 + len(v) + sovDmmaster(uint64(len(v))) n += mapEntrySize + 1 + sovDmmaster(uint64(mapEntrySize)) } } return n } func (m *ListSourceConfigsResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmmaster(uint64(l)) } if len(m.SourceConfigs) > 0 { for k, v := range m.SourceConfigs { _ = k _ = v mapEntrySize := 1 + len(k) + sovDmmaster(uint64(len(k))) + 1 + len(v) + sovDmmaster(uint64(len(v))) n += mapEntrySize + 1 + sovDmmaster(uint64(mapEntrySize)) } } return n } func sovDmmaster(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozDmmaster(x uint64) (n int) { return sovDmmaster(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *StartTaskRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StartTaskRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StartTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RemoveMeta", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.RemoveMeta = bool(v != 0) case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.StartTime = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *StartTaskResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StartTaskResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StartTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CheckResult", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.CheckResult = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateTaskRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateTaskRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= TaskOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateTaskResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateTaskResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= TaskOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UpdateTaskResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UpdateTaskResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UpdateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CheckResult", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.CheckResult = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *QueryStatusListRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: QueryStatusListRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: QueryStatusListRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *QueryStatusListResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: QueryStatusListResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: QueryStatusListResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &QueryStatusResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ShowDDLLocksRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ShowDDLLocksRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ShowDDLLocksRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DDLLock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DDLLock: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DDLLock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Mode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Owner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DDLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.DDLs = append(m.DDLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Synced", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Synced = append(m.Synced, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Unsynced", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Unsynced = append(m.Unsynced, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ShowDDLLocksResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ShowDDLLocksResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ShowDDLLocksResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Locks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Locks = append(m.Locks, &DDLLock{}) if err := m.Locks[len(m.Locks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UnlockDDLLockRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UnlockDDLLockRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UnlockDDLLockRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ReplaceOwner", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.ReplaceOwner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ForceRemove", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.ForceRemove = bool(v != 0) case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= UnlockDDLLockOp(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Database = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Table = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UnlockDDLLockResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UnlockDDLLockResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UnlockDDLLockResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateWorkerRelayRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateWorkerRelayRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateWorkerRelayRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= RelayOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateWorkerRelayResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateWorkerRelayResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateWorkerRelayResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= RelayOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PurgeWorkerRelayRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PurgeWorkerRelayRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PurgeWorkerRelayRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Inactive", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Inactive = bool(v != 0) case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } m.Time = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Time |= int64(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Filename = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SubDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.SubDir = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PurgeWorkerRelayResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PurgeWorkerRelayResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PurgeWorkerRelayResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckTaskRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckTaskRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ErrCnt", wireType) } m.ErrCnt = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ErrCnt |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field WarnCnt", wireType) } m.WarnCnt = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.WarnCnt |= int64(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.StartTime = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckTaskResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckTaskResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateSourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateSourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= SourceOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Config = append(m.Config, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SourceID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.SourceID = append(m.SourceID, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field WorkerName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.WorkerName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateSourceResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateSourceResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RegisterWorkerRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RegisterWorkerRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RegisterWorkerRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Address = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RegisterWorkerResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RegisterWorkerResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RegisterWorkerResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SecretKey", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.SecretKey = append(m.SecretKey[:0], dAtA[iNdEx:postIndex]...) if m.SecretKey == nil { m.SecretKey = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OfflineMemberRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OfflineMemberRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OfflineMemberRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OfflineMemberResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OfflineMemberResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OfflineMemberResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateLeaderRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateLeaderRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= LeaderOp(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateLeaderResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateLeaderResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *MasterInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: MasterInfo: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: MasterInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType) } m.MemberID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.MemberID |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Alive", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Alive = bool(v != 0) case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *WorkerInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: WorkerInfo: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: WorkerInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Addr = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Stage = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ListLeaderMember) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ListLeaderMember: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ListLeaderMember: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Addr = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ListMasterMember) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ListMasterMember: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ListMasterMember: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Masters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Masters = append(m.Masters, &MasterInfo{}) if err := m.Masters[len(m.Masters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ListWorkerMember) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ListWorkerMember: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ListWorkerMember: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Workers = append(m.Workers, &WorkerInfo{}) if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Members) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Members: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Members: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } v := &ListLeaderMember{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Member = &Members_Leader{v} iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Master", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } v := &ListMasterMember{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Member = &Members_Master{v} iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } v := &ListWorkerMember{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Member = &Members_Worker{v} iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ListMemberRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ListMemberRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ListMemberRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Leader = bool(v != 0) case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Master", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Master = bool(v != 0) case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Worker = bool(v != 0) case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ListMemberResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ListMemberResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ListMemberResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Members = append(m.Members, &Members{}) if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateSchemaRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= SchemaOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Database = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Table = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Schema = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Flush = bool(v != 0) case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Sync = bool(v != 0) case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FromSource", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.FromSource = bool(v != 0) case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FromTarget", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.FromTarget = bool(v != 0) default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateSchemaResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetSubTaskCfgRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetSubTaskCfgRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetSubTaskCfgRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetSubTaskCfgResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetSubTaskCfgResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetSubTaskCfgResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cfgs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Cfgs = append(m.Cfgs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetCfgRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetCfgRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetCfgRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Type |= CfgType(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetCfgResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetCfgResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetCfgResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cfg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Cfg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetMasterCfgRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetMasterCfgRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetMasterCfgRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetMasterCfgResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetMasterCfgResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetMasterCfgResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cfg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Cfg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HandleErrorRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HandleErrorRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HandleErrorRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= ErrorOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BinlogPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.BinlogPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sqls", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sqls = append(m.Sqls, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HandleErrorResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HandleErrorResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HandleErrorResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TransferSourceRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TransferSourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TransferSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Worker = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *TransferSourceResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TransferSourceResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TransferSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateRelayRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateRelayRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateRelayRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= RelayOpV2(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Worker = append(m.Worker, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateRelayResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateRelayResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateRelayResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *StartValidationRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StartValidationRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StartValidationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ModeValue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Mode = &StartValidationRequest_ModeValue{string(dAtA[iNdEx:postIndex])} iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field StartTimeValue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.StartTime = &StartValidationRequest_StartTimeValue{string(dAtA[iNdEx:postIndex])} iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.TaskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *StartValidationResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StartValidationResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StartValidationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *StopValidationRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StopValidationRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StopValidationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.TaskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *StopValidationResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: StopValidationResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: StopValidationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UpdateValidationRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UpdateValidationRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UpdateValidationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.TaskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BinlogPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.BinlogPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BinlogGTID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.BinlogGTID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UpdateValidationResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UpdateValidationResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UpdateValidationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Sources = append(m.Sources, &CommonWorkerResponse{}) if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *EncryptRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: EncryptRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: EncryptRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Plaintext", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Plaintext = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *EncryptResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: EncryptResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: EncryptResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ciphertext", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Ciphertext = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ListTaskConfigsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ListTaskConfigsResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ListTaskConfigsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskConfigs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } if m.TaskConfigs == nil { m.TaskConfigs = make(map[string]string) } var mapkey string var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthDmmaster } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey < 0 { return ErrInvalidLengthDmmaster } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLenmapvalue := int(stringLenmapvalue) if intStringLenmapvalue < 0 { return ErrInvalidLengthDmmaster } postStringIndexmapvalue := iNdEx + intStringLenmapvalue if postStringIndexmapvalue < 0 { return ErrInvalidLengthDmmaster } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.TaskConfigs[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ListSourceConfigsResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ListSourceConfigsResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ListSourceConfigsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SourceConfigs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmmaster } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmmaster } if postIndex > l { return io.ErrUnexpectedEOF } if m.SourceConfigs == nil { m.SourceConfigs = make(map[string]string) } var mapkey string var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthDmmaster } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey < 0 { return ErrInvalidLengthDmmaster } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmmaster } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLenmapvalue := int(stringLenmapvalue) if intStringLenmapvalue < 0 { return ErrInvalidLengthDmmaster } postStringIndexmapvalue := iNdEx + intStringLenmapvalue if postStringIndexmapvalue < 0 { return ErrInvalidLengthDmmaster } if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.SourceConfigs[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmmaster(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmmaster } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipDmmaster(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowDmmaster } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowDmmaster } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowDmmaster } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthDmmaster } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupDmmaster } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthDmmaster } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthDmmaster = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowDmmaster = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupDmmaster = fmt.Errorf("proto: unexpected end of group") ) tiup-1.16.3/pkg/cluster/api/dmpb/dmworker.pb.go000066400000000000000000013543671505422223000213170ustar00rootroot00000000000000// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: dmworker.proto package dmpb import ( context "context" encoding_binary "encoding/binary" fmt "fmt" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type TaskOp int32 const ( TaskOp_InvalidOp TaskOp = 0 TaskOp_Stop TaskOp = 1 TaskOp_Pause TaskOp = 2 TaskOp_Resume TaskOp = 3 TaskOp_Start TaskOp = 4 TaskOp_Update TaskOp = 5 TaskOp_AutoResume TaskOp = 6 TaskOp_Delete TaskOp = 7 ) var TaskOp_name = map[int32]string{ 0: "InvalidOp", 1: "Stop", 2: "Pause", 3: "Resume", 4: "Start", 5: "Update", 6: "AutoResume", 7: "Delete", } var TaskOp_value = map[string]int32{ "InvalidOp": 0, "Stop": 1, "Pause": 2, "Resume": 3, "Start": 4, "Update": 5, "AutoResume": 6, "Delete": 7, } func (x TaskOp) String() string { return proto.EnumName(TaskOp_name, int32(x)) } func (TaskOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{0} } // Stage represents current stage for a (sub) task // a (sub) task should be always in one stage of the following stages // (sub) task can transfer from on stage to some special other stages // New: initial stage when a sub task is created // // can not be transferred from other stages // transfers to Running when initialize with no error // // Running: indicates the sub task is processing // // is transferred from New when created successfully // is transferred from Paused when resuming is requested // transfers to Paused when error occurred or requested from external // transfers to Stopped when requested from external // transfers to Finished when sub task processing completed (no Syncer used) // // Paused: indicates the processing is paused, and can be resume from external request // // is transferred from Running when error occurred or requested from external // transfers to Running when resuming is requested from external // transfers to Stopped when requested from external // // Stopped: indicates the processing is stopped, and can not be resume (or re-run) again // // is transferred from Running / Paused when requested from external // can not transfer to any stages // // Finished: indicates the processing is finished, and no need to re-run // // is transferred from Running when processing completed // should not transfer to any stages type Stage int32 const ( Stage_InvalidStage Stage = 0 Stage_New Stage = 1 Stage_Running Stage = 2 Stage_Paused Stage = 3 Stage_Stopped Stage = 4 Stage_Finished Stage = 5 Stage_Pausing Stage = 6 Stage_Resuming Stage = 7 Stage_Stopping Stage = 8 ) var Stage_name = map[int32]string{ 0: "InvalidStage", 1: "New", 2: "Running", 3: "Paused", 4: "Stopped", 5: "Finished", 6: "Pausing", 7: "Resuming", 8: "Stopping", } var Stage_value = map[string]int32{ "InvalidStage": 0, "New": 1, "Running": 2, "Paused": 3, "Stopped": 4, "Finished": 5, "Pausing": 6, "Resuming": 7, "Stopping": 8, } func (x Stage) String() string { return proto.EnumName(Stage_name, int32(x)) } func (Stage) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{1} } // UnitType represents the dm unit's type type UnitType int32 const ( UnitType_InvalidUnit UnitType = 0 UnitType_Check UnitType = 1 UnitType_Dump UnitType = 2 UnitType_Load UnitType = 3 UnitType_Sync UnitType = 4 UnitType_Relay UnitType = 100 ) var UnitType_name = map[int32]string{ 0: "InvalidUnit", 1: "Check", 2: "Dump", 3: "Load", 4: "Sync", 100: "Relay", } var UnitType_value = map[string]int32{ "InvalidUnit": 0, "Check": 1, "Dump": 2, "Load": 3, "Sync": 4, "Relay": 100, } func (x UnitType) String() string { return proto.EnumName(UnitType_name, int32(x)) } func (UnitType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{2} } // RelayOp differs from TaskOp type RelayOp int32 const ( RelayOp_InvalidRelayOp RelayOp = 0 RelayOp_StopRelay RelayOp = 1 RelayOp_PauseRelay RelayOp = 2 RelayOp_ResumeRelay RelayOp = 3 ) var RelayOp_name = map[int32]string{ 0: "InvalidRelayOp", 1: "StopRelay", 2: "PauseRelay", 3: "ResumeRelay", } var RelayOp_value = map[string]int32{ "InvalidRelayOp": 0, "StopRelay": 1, "PauseRelay": 2, "ResumeRelay": 3, } func (x RelayOp) String() string { return proto.EnumName(RelayOp_name, int32(x)) } func (RelayOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{3} } type SchemaOp int32 const ( SchemaOp_InvalidSchemaOp SchemaOp = 0 SchemaOp_GetSchema SchemaOp = 1 SchemaOp_SetSchema SchemaOp = 2 SchemaOp_RemoveSchema SchemaOp = 3 SchemaOp_ListSchema SchemaOp = 4 SchemaOp_ListTable SchemaOp = 5 SchemaOp_ListMigrateTargets SchemaOp = 6 ) var SchemaOp_name = map[int32]string{ 0: "InvalidSchemaOp", 1: "GetSchema", 2: "SetSchema", 3: "RemoveSchema", 4: "ListSchema", 5: "ListTable", 6: "ListMigrateTargets", } var SchemaOp_value = map[string]int32{ "InvalidSchemaOp": 0, "GetSchema": 1, "SetSchema": 2, "RemoveSchema": 3, "ListSchema": 4, "ListTable": 5, "ListMigrateTargets": 6, } func (x SchemaOp) String() string { return proto.EnumName(SchemaOp_name, int32(x)) } func (SchemaOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{4} } type V1MetaOp int32 const ( V1MetaOp_InvalidV1MetaOp V1MetaOp = 0 V1MetaOp_GetV1Meta V1MetaOp = 1 V1MetaOp_RemoveV1Meta V1MetaOp = 2 ) var V1MetaOp_name = map[int32]string{ 0: "InvalidV1MetaOp", 1: "GetV1Meta", 2: "RemoveV1Meta", } var V1MetaOp_value = map[string]int32{ "InvalidV1MetaOp": 0, "GetV1Meta": 1, "RemoveV1Meta": 2, } func (x V1MetaOp) String() string { return proto.EnumName(V1MetaOp_name, int32(x)) } func (V1MetaOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{5} } type ErrorOp int32 const ( ErrorOp_InvalidErrorOp ErrorOp = 0 ErrorOp_Skip ErrorOp = 1 ErrorOp_Replace ErrorOp = 2 ErrorOp_Revert ErrorOp = 3 ErrorOp_Inject ErrorOp = 4 ErrorOp_List ErrorOp = 5 ) var ErrorOp_name = map[int32]string{ 0: "InvalidErrorOp", 1: "Skip", 2: "Replace", 3: "Revert", 4: "Inject", 5: "List", } var ErrorOp_value = map[string]int32{ "InvalidErrorOp": 0, "Skip": 1, "Replace": 2, "Revert": 3, "Inject": 4, "List": 5, } func (x ErrorOp) String() string { return proto.EnumName(ErrorOp_name, int32(x)) } func (ErrorOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{6} } type ValidatorOp int32 const ( ValidatorOp_InvalidValidatorOp ValidatorOp = 0 ValidatorOp_StartValidator ValidatorOp = 1 ValidatorOp_StopValidator ValidatorOp = 2 ) var ValidatorOp_name = map[int32]string{ 0: "InvalidValidatorOp", 1: "StartValidator", 2: "StopValidator", } var ValidatorOp_value = map[string]int32{ "InvalidValidatorOp": 0, "StartValidator": 1, "StopValidator": 2, } func (x ValidatorOp) String() string { return proto.EnumName(ValidatorOp_name, int32(x)) } func (ValidatorOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{7} } type ValidateErrorState int32 const ( ValidateErrorState_InvalidErr ValidateErrorState = 0 ValidateErrorState_NewErr ValidateErrorState = 1 ValidateErrorState_IgnoredErr ValidateErrorState = 2 ValidateErrorState_ResolvedErr ValidateErrorState = 3 ) var ValidateErrorState_name = map[int32]string{ 0: "InvalidErr", 1: "NewErr", 2: "IgnoredErr", 3: "ResolvedErr", } var ValidateErrorState_value = map[string]int32{ "InvalidErr": 0, "NewErr": 1, "IgnoredErr": 2, "ResolvedErr": 3, } func (x ValidateErrorState) String() string { return proto.EnumName(ValidateErrorState_name, int32(x)) } func (ValidateErrorState) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{8} } type ValidationErrOp int32 const ( ValidationErrOp_InvalidErrOp ValidationErrOp = 0 ValidationErrOp_IgnoreErrOp ValidationErrOp = 1 ValidationErrOp_ResolveErrOp ValidationErrOp = 2 ValidationErrOp_ClearErrOp ValidationErrOp = 3 ) var ValidationErrOp_name = map[int32]string{ 0: "InvalidErrOp", 1: "IgnoreErrOp", 2: "ResolveErrOp", 3: "ClearErrOp", } var ValidationErrOp_value = map[string]int32{ "InvalidErrOp": 0, "IgnoreErrOp": 1, "ResolveErrOp": 2, "ClearErrOp": 3, } func (x ValidationErrOp) String() string { return proto.EnumName(ValidationErrOp_name, int32(x)) } func (ValidationErrOp) EnumDescriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{9} } type QueryStatusRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (m *QueryStatusRequest) Reset() { *m = QueryStatusRequest{} } func (m *QueryStatusRequest) String() string { return proto.CompactTextString(m) } func (*QueryStatusRequest) ProtoMessage() {} func (*QueryStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{0} } func (m *QueryStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *QueryStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_QueryStatusRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *QueryStatusRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryStatusRequest.Merge(m, src) } func (m *QueryStatusRequest) XXX_Size() int { return m.Size() } func (m *QueryStatusRequest) XXX_DiscardUnknown() { xxx_messageInfo_QueryStatusRequest.DiscardUnknown(m) } var xxx_messageInfo_QueryStatusRequest proto.InternalMessageInfo func (m *QueryStatusRequest) GetName() string { if m != nil { return m.Name } return "" } type CommonWorkerResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` Worker string `protobuf:"bytes,4,opt,name=worker,proto3" json:"worker,omitempty"` } func (m *CommonWorkerResponse) Reset() { *m = CommonWorkerResponse{} } func (m *CommonWorkerResponse) String() string { return proto.CompactTextString(m) } func (*CommonWorkerResponse) ProtoMessage() {} func (*CommonWorkerResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{1} } func (m *CommonWorkerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CommonWorkerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CommonWorkerResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *CommonWorkerResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CommonWorkerResponse.Merge(m, src) } func (m *CommonWorkerResponse) XXX_Size() int { return m.Size() } func (m *CommonWorkerResponse) XXX_DiscardUnknown() { xxx_messageInfo_CommonWorkerResponse.DiscardUnknown(m) } var xxx_messageInfo_CommonWorkerResponse proto.InternalMessageInfo func (m *CommonWorkerResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *CommonWorkerResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *CommonWorkerResponse) GetSource() string { if m != nil { return m.Source } return "" } func (m *CommonWorkerResponse) GetWorker() string { if m != nil { return m.Worker } return "" } // QueryStatusResponse represents status response for query on a dm-worker // status: dm-worker's current sub tasks' status type QueryStatusResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` SourceStatus *SourceStatus `protobuf:"bytes,3,opt,name=sourceStatus,proto3" json:"sourceStatus,omitempty"` SubTaskStatus []*SubTaskStatus `protobuf:"bytes,4,rep,name=subTaskStatus,proto3" json:"subTaskStatus,omitempty"` } func (m *QueryStatusResponse) Reset() { *m = QueryStatusResponse{} } func (m *QueryStatusResponse) String() string { return proto.CompactTextString(m) } func (*QueryStatusResponse) ProtoMessage() {} func (*QueryStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{2} } func (m *QueryStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *QueryStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_QueryStatusResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *QueryStatusResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_QueryStatusResponse.Merge(m, src) } func (m *QueryStatusResponse) XXX_Size() int { return m.Size() } func (m *QueryStatusResponse) XXX_DiscardUnknown() { xxx_messageInfo_QueryStatusResponse.DiscardUnknown(m) } var xxx_messageInfo_QueryStatusResponse proto.InternalMessageInfo func (m *QueryStatusResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *QueryStatusResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *QueryStatusResponse) GetSourceStatus() *SourceStatus { if m != nil { return m.SourceStatus } return nil } func (m *QueryStatusResponse) GetSubTaskStatus() []*SubTaskStatus { if m != nil { return m.SubTaskStatus } return nil } // CheckStatus represents status for check unit // adds fields later type CheckStatus struct { Passed bool `protobuf:"varint,1,opt,name=passed,proto3" json:"passed,omitempty"` Total int32 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"` Successful int32 `protobuf:"varint,3,opt,name=successful,proto3" json:"successful,omitempty"` Failed int32 `protobuf:"varint,4,opt,name=failed,proto3" json:"failed,omitempty"` Warning int32 `protobuf:"varint,5,opt,name=warning,proto3" json:"warning,omitempty"` Detail []byte `protobuf:"bytes,6,opt,name=detail,proto3" json:"detail,omitempty"` } func (m *CheckStatus) Reset() { *m = CheckStatus{} } func (m *CheckStatus) String() string { return proto.CompactTextString(m) } func (*CheckStatus) ProtoMessage() {} func (*CheckStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{3} } func (m *CheckStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *CheckStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckStatus.Merge(m, src) } func (m *CheckStatus) XXX_Size() int { return m.Size() } func (m *CheckStatus) XXX_DiscardUnknown() { xxx_messageInfo_CheckStatus.DiscardUnknown(m) } var xxx_messageInfo_CheckStatus proto.InternalMessageInfo func (m *CheckStatus) GetPassed() bool { if m != nil { return m.Passed } return false } func (m *CheckStatus) GetTotal() int32 { if m != nil { return m.Total } return 0 } func (m *CheckStatus) GetSuccessful() int32 { if m != nil { return m.Successful } return 0 } func (m *CheckStatus) GetFailed() int32 { if m != nil { return m.Failed } return 0 } func (m *CheckStatus) GetWarning() int32 { if m != nil { return m.Warning } return 0 } func (m *CheckStatus) GetDetail() []byte { if m != nil { return m.Detail } return nil } // DumpStatus represents status for dump unit // add fields later type DumpStatus struct { TotalTables int64 `protobuf:"varint,1,opt,name=totalTables,proto3" json:"totalTables,omitempty"` CompletedTables float64 `protobuf:"fixed64,2,opt,name=completedTables,proto3" json:"completedTables,omitempty"` FinishedBytes float64 `protobuf:"fixed64,3,opt,name=finishedBytes,proto3" json:"finishedBytes,omitempty"` FinishedRows float64 `protobuf:"fixed64,4,opt,name=finishedRows,proto3" json:"finishedRows,omitempty"` EstimateTotalRows float64 `protobuf:"fixed64,5,opt,name=estimateTotalRows,proto3" json:"estimateTotalRows,omitempty"` Bps int64 `protobuf:"varint,6,opt,name=bps,proto3" json:"bps,omitempty"` Progress string `protobuf:"bytes,7,opt,name=progress,proto3" json:"progress,omitempty"` } func (m *DumpStatus) Reset() { *m = DumpStatus{} } func (m *DumpStatus) String() string { return proto.CompactTextString(m) } func (*DumpStatus) ProtoMessage() {} func (*DumpStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{4} } func (m *DumpStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *DumpStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_DumpStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *DumpStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_DumpStatus.Merge(m, src) } func (m *DumpStatus) XXX_Size() int { return m.Size() } func (m *DumpStatus) XXX_DiscardUnknown() { xxx_messageInfo_DumpStatus.DiscardUnknown(m) } var xxx_messageInfo_DumpStatus proto.InternalMessageInfo func (m *DumpStatus) GetTotalTables() int64 { if m != nil { return m.TotalTables } return 0 } func (m *DumpStatus) GetCompletedTables() float64 { if m != nil { return m.CompletedTables } return 0 } func (m *DumpStatus) GetFinishedBytes() float64 { if m != nil { return m.FinishedBytes } return 0 } func (m *DumpStatus) GetFinishedRows() float64 { if m != nil { return m.FinishedRows } return 0 } func (m *DumpStatus) GetEstimateTotalRows() float64 { if m != nil { return m.EstimateTotalRows } return 0 } func (m *DumpStatus) GetBps() int64 { if m != nil { return m.Bps } return 0 } func (m *DumpStatus) GetProgress() string { if m != nil { return m.Progress } return "" } // LoadStatus represents status for load unit type LoadStatus struct { FinishedBytes int64 `protobuf:"varint,1,opt,name=finishedBytes,proto3" json:"finishedBytes,omitempty"` TotalBytes int64 `protobuf:"varint,2,opt,name=totalBytes,proto3" json:"totalBytes,omitempty"` Progress string `protobuf:"bytes,3,opt,name=progress,proto3" json:"progress,omitempty"` MetaBinlog string `protobuf:"bytes,4,opt,name=metaBinlog,proto3" json:"metaBinlog,omitempty"` MetaBinlogGTID string `protobuf:"bytes,5,opt,name=metaBinlogGTID,proto3" json:"metaBinlogGTID,omitempty"` Bps int64 `protobuf:"varint,6,opt,name=bps,proto3" json:"bps,omitempty"` } func (m *LoadStatus) Reset() { *m = LoadStatus{} } func (m *LoadStatus) String() string { return proto.CompactTextString(m) } func (*LoadStatus) ProtoMessage() {} func (*LoadStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{5} } func (m *LoadStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LoadStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_LoadStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *LoadStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadStatus.Merge(m, src) } func (m *LoadStatus) XXX_Size() int { return m.Size() } func (m *LoadStatus) XXX_DiscardUnknown() { xxx_messageInfo_LoadStatus.DiscardUnknown(m) } var xxx_messageInfo_LoadStatus proto.InternalMessageInfo func (m *LoadStatus) GetFinishedBytes() int64 { if m != nil { return m.FinishedBytes } return 0 } func (m *LoadStatus) GetTotalBytes() int64 { if m != nil { return m.TotalBytes } return 0 } func (m *LoadStatus) GetProgress() string { if m != nil { return m.Progress } return "" } func (m *LoadStatus) GetMetaBinlog() string { if m != nil { return m.MetaBinlog } return "" } func (m *LoadStatus) GetMetaBinlogGTID() string { if m != nil { return m.MetaBinlogGTID } return "" } func (m *LoadStatus) GetBps() int64 { if m != nil { return m.Bps } return 0 } // ShardingGroup represents a DDL sharding group, this is used by SyncStatus, and is differ from ShardingGroup in syncer pkg // target: target table name // DDL: in syncing DDL // firstPos: first DDL binlog pos for this group // synced: synced source tables // unsynced: unsynced source tables type ShardingGroup struct { Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` DDLs []string `protobuf:"bytes,2,rep,name=DDLs,proto3" json:"DDLs,omitempty"` FirstLocation string `protobuf:"bytes,3,opt,name=firstLocation,proto3" json:"firstLocation,omitempty"` Synced []string `protobuf:"bytes,4,rep,name=synced,proto3" json:"synced,omitempty"` Unsynced []string `protobuf:"bytes,5,rep,name=unsynced,proto3" json:"unsynced,omitempty"` } func (m *ShardingGroup) Reset() { *m = ShardingGroup{} } func (m *ShardingGroup) String() string { return proto.CompactTextString(m) } func (*ShardingGroup) ProtoMessage() {} func (*ShardingGroup) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{6} } func (m *ShardingGroup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ShardingGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ShardingGroup.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ShardingGroup) XXX_Merge(src proto.Message) { xxx_messageInfo_ShardingGroup.Merge(m, src) } func (m *ShardingGroup) XXX_Size() int { return m.Size() } func (m *ShardingGroup) XXX_DiscardUnknown() { xxx_messageInfo_ShardingGroup.DiscardUnknown(m) } var xxx_messageInfo_ShardingGroup proto.InternalMessageInfo func (m *ShardingGroup) GetTarget() string { if m != nil { return m.Target } return "" } func (m *ShardingGroup) GetDDLs() []string { if m != nil { return m.DDLs } return nil } func (m *ShardingGroup) GetFirstLocation() string { if m != nil { return m.FirstLocation } return "" } func (m *ShardingGroup) GetSynced() []string { if m != nil { return m.Synced } return nil } func (m *ShardingGroup) GetUnsynced() []string { if m != nil { return m.Unsynced } return nil } // SyncStatus represents status for sync unit type SyncStatus struct { // totalEvents/totalTps/recentTps has been deprecated now TotalEvents int64 `protobuf:"varint,1,opt,name=totalEvents,proto3" json:"totalEvents,omitempty"` TotalTps int64 `protobuf:"varint,2,opt,name=totalTps,proto3" json:"totalTps,omitempty"` RecentTps int64 `protobuf:"varint,3,opt,name=recentTps,proto3" json:"recentTps,omitempty"` MasterBinlog string `protobuf:"bytes,4,opt,name=masterBinlog,proto3" json:"masterBinlog,omitempty"` MasterBinlogGtid string `protobuf:"bytes,5,opt,name=masterBinlogGtid,proto3" json:"masterBinlogGtid,omitempty"` SyncerBinlog string `protobuf:"bytes,6,opt,name=syncerBinlog,proto3" json:"syncerBinlog,omitempty"` SyncerBinlogGtid string `protobuf:"bytes,7,opt,name=syncerBinlogGtid,proto3" json:"syncerBinlogGtid,omitempty"` BlockingDDLs []string `protobuf:"bytes,8,rep,name=blockingDDLs,proto3" json:"blockingDDLs,omitempty"` UnresolvedGroups []*ShardingGroup `protobuf:"bytes,9,rep,name=unresolvedGroups,proto3" json:"unresolvedGroups,omitempty"` Synced bool `protobuf:"varint,10,opt,name=synced,proto3" json:"synced,omitempty"` BinlogType string `protobuf:"bytes,11,opt,name=binlogType,proto3" json:"binlogType,omitempty"` SecondsBehindMaster int64 `protobuf:"varint,12,opt,name=secondsBehindMaster,proto3" json:"secondsBehindMaster,omitempty"` BlockDDLOwner string `protobuf:"bytes,13,opt,name=blockDDLOwner,proto3" json:"blockDDLOwner,omitempty"` ConflictMsg string `protobuf:"bytes,14,opt,name=conflictMsg,proto3" json:"conflictMsg,omitempty"` TotalRows int64 `protobuf:"varint,15,opt,name=totalRows,proto3" json:"totalRows,omitempty"` TotalRps int64 `protobuf:"varint,16,opt,name=totalRps,proto3" json:"totalRps,omitempty"` RecentRps int64 `protobuf:"varint,17,opt,name=recentRps,proto3" json:"recentRps,omitempty"` // meter TCP io to downstream of the subtask IoTotalBytes uint64 `protobuf:"varint,18,opt,name=ioTotalBytes,proto3" json:"ioTotalBytes,omitempty"` // meter TCP io from upstream of the subtask DumpIOTotalBytes uint64 `protobuf:"varint,19,opt,name=dumpIOTotalBytes,proto3" json:"dumpIOTotalBytes,omitempty"` } func (m *SyncStatus) Reset() { *m = SyncStatus{} } func (m *SyncStatus) String() string { return proto.CompactTextString(m) } func (*SyncStatus) ProtoMessage() {} func (*SyncStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{7} } func (m *SyncStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SyncStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SyncStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SyncStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_SyncStatus.Merge(m, src) } func (m *SyncStatus) XXX_Size() int { return m.Size() } func (m *SyncStatus) XXX_DiscardUnknown() { xxx_messageInfo_SyncStatus.DiscardUnknown(m) } var xxx_messageInfo_SyncStatus proto.InternalMessageInfo func (m *SyncStatus) GetTotalEvents() int64 { if m != nil { return m.TotalEvents } return 0 } func (m *SyncStatus) GetTotalTps() int64 { if m != nil { return m.TotalTps } return 0 } func (m *SyncStatus) GetRecentTps() int64 { if m != nil { return m.RecentTps } return 0 } func (m *SyncStatus) GetMasterBinlog() string { if m != nil { return m.MasterBinlog } return "" } func (m *SyncStatus) GetMasterBinlogGtid() string { if m != nil { return m.MasterBinlogGtid } return "" } func (m *SyncStatus) GetSyncerBinlog() string { if m != nil { return m.SyncerBinlog } return "" } func (m *SyncStatus) GetSyncerBinlogGtid() string { if m != nil { return m.SyncerBinlogGtid } return "" } func (m *SyncStatus) GetBlockingDDLs() []string { if m != nil { return m.BlockingDDLs } return nil } func (m *SyncStatus) GetUnresolvedGroups() []*ShardingGroup { if m != nil { return m.UnresolvedGroups } return nil } func (m *SyncStatus) GetSynced() bool { if m != nil { return m.Synced } return false } func (m *SyncStatus) GetBinlogType() string { if m != nil { return m.BinlogType } return "" } func (m *SyncStatus) GetSecondsBehindMaster() int64 { if m != nil { return m.SecondsBehindMaster } return 0 } func (m *SyncStatus) GetBlockDDLOwner() string { if m != nil { return m.BlockDDLOwner } return "" } func (m *SyncStatus) GetConflictMsg() string { if m != nil { return m.ConflictMsg } return "" } func (m *SyncStatus) GetTotalRows() int64 { if m != nil { return m.TotalRows } return 0 } func (m *SyncStatus) GetTotalRps() int64 { if m != nil { return m.TotalRps } return 0 } func (m *SyncStatus) GetRecentRps() int64 { if m != nil { return m.RecentRps } return 0 } func (m *SyncStatus) GetIoTotalBytes() uint64 { if m != nil { return m.IoTotalBytes } return 0 } func (m *SyncStatus) GetDumpIOTotalBytes() uint64 { if m != nil { return m.DumpIOTotalBytes } return 0 } // SourceStatus represents status for source runing on dm-worker type SourceStatus struct { Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` Worker string `protobuf:"bytes,2,opt,name=worker,proto3" json:"worker,omitempty"` Result *ProcessResult `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` RelayStatus *RelayStatus `protobuf:"bytes,4,opt,name=relayStatus,proto3" json:"relayStatus,omitempty"` } func (m *SourceStatus) Reset() { *m = SourceStatus{} } func (m *SourceStatus) String() string { return proto.CompactTextString(m) } func (*SourceStatus) ProtoMessage() {} func (*SourceStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{8} } func (m *SourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SourceStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SourceStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_SourceStatus.Merge(m, src) } func (m *SourceStatus) XXX_Size() int { return m.Size() } func (m *SourceStatus) XXX_DiscardUnknown() { xxx_messageInfo_SourceStatus.DiscardUnknown(m) } var xxx_messageInfo_SourceStatus proto.InternalMessageInfo func (m *SourceStatus) GetSource() string { if m != nil { return m.Source } return "" } func (m *SourceStatus) GetWorker() string { if m != nil { return m.Worker } return "" } func (m *SourceStatus) GetResult() *ProcessResult { if m != nil { return m.Result } return nil } func (m *SourceStatus) GetRelayStatus() *RelayStatus { if m != nil { return m.RelayStatus } return nil } // RelayStatus represents status for relay unit. type RelayStatus struct { MasterBinlog string `protobuf:"bytes,1,opt,name=masterBinlog,proto3" json:"masterBinlog,omitempty"` MasterBinlogGtid string `protobuf:"bytes,2,opt,name=masterBinlogGtid,proto3" json:"masterBinlogGtid,omitempty"` RelaySubDir string `protobuf:"bytes,3,opt,name=relaySubDir,proto3" json:"relaySubDir,omitempty"` RelayBinlog string `protobuf:"bytes,4,opt,name=relayBinlog,proto3" json:"relayBinlog,omitempty"` RelayBinlogGtid string `protobuf:"bytes,5,opt,name=relayBinlogGtid,proto3" json:"relayBinlogGtid,omitempty"` RelayCatchUpMaster bool `protobuf:"varint,6,opt,name=relayCatchUpMaster,proto3" json:"relayCatchUpMaster,omitempty"` Stage Stage `protobuf:"varint,7,opt,name=stage,proto3,enum=pb.Stage" json:"stage,omitempty"` Result *ProcessResult `protobuf:"bytes,8,opt,name=result,proto3" json:"result,omitempty"` } func (m *RelayStatus) Reset() { *m = RelayStatus{} } func (m *RelayStatus) String() string { return proto.CompactTextString(m) } func (*RelayStatus) ProtoMessage() {} func (*RelayStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{9} } func (m *RelayStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RelayStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RelayStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *RelayStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_RelayStatus.Merge(m, src) } func (m *RelayStatus) XXX_Size() int { return m.Size() } func (m *RelayStatus) XXX_DiscardUnknown() { xxx_messageInfo_RelayStatus.DiscardUnknown(m) } var xxx_messageInfo_RelayStatus proto.InternalMessageInfo func (m *RelayStatus) GetMasterBinlog() string { if m != nil { return m.MasterBinlog } return "" } func (m *RelayStatus) GetMasterBinlogGtid() string { if m != nil { return m.MasterBinlogGtid } return "" } func (m *RelayStatus) GetRelaySubDir() string { if m != nil { return m.RelaySubDir } return "" } func (m *RelayStatus) GetRelayBinlog() string { if m != nil { return m.RelayBinlog } return "" } func (m *RelayStatus) GetRelayBinlogGtid() string { if m != nil { return m.RelayBinlogGtid } return "" } func (m *RelayStatus) GetRelayCatchUpMaster() bool { if m != nil { return m.RelayCatchUpMaster } return false } func (m *RelayStatus) GetStage() Stage { if m != nil { return m.Stage } return Stage_InvalidStage } func (m *RelayStatus) GetResult() *ProcessResult { if m != nil { return m.Result } return nil } // SubTaskStatus represents status for a sub task // name: sub task'name, when starting a sub task the name should be unique // stage: sub task's current stage // unit: sub task's current dm unit's UnitType // result: current unit's process result, when the stage is Running, no result // unresolvedDDLLockID: un-resolved sharding DDL lock ID (ref DDLLockInfo) // // if needed, we can put this to SyncStatus // // status: current unit's statistics // // for Load, includes total bytes, progress, etc. // for Sync, includes TPS, binlog meta, etc. type SubTaskStatus struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Stage Stage `protobuf:"varint,2,opt,name=stage,proto3,enum=pb.Stage" json:"stage,omitempty"` Unit UnitType `protobuf:"varint,3,opt,name=unit,proto3,enum=pb.UnitType" json:"unit,omitempty"` Result *ProcessResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` UnresolvedDDLLockID string `protobuf:"bytes,5,opt,name=unresolvedDDLLockID,proto3" json:"unresolvedDDLLockID,omitempty"` // Types that are valid to be assigned to Status: // // *SubTaskStatus_Msg // *SubTaskStatus_Check // *SubTaskStatus_Dump // *SubTaskStatus_Load // *SubTaskStatus_Sync Status isSubTaskStatus_Status `protobuf_oneof:"status"` Validation *ValidationStatus `protobuf:"bytes,11,opt,name=validation,proto3" json:"validation,omitempty"` } func (m *SubTaskStatus) Reset() { *m = SubTaskStatus{} } func (m *SubTaskStatus) String() string { return proto.CompactTextString(m) } func (*SubTaskStatus) ProtoMessage() {} func (*SubTaskStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{10} } func (m *SubTaskStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SubTaskStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SubTaskStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SubTaskStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_SubTaskStatus.Merge(m, src) } func (m *SubTaskStatus) XXX_Size() int { return m.Size() } func (m *SubTaskStatus) XXX_DiscardUnknown() { xxx_messageInfo_SubTaskStatus.DiscardUnknown(m) } var xxx_messageInfo_SubTaskStatus proto.InternalMessageInfo type isSubTaskStatus_Status interface { isSubTaskStatus_Status() MarshalTo([]byte) (int, error) Size() int } type SubTaskStatus_Msg struct { Msg string `protobuf:"bytes,6,opt,name=msg,proto3,oneof" json:"msg,omitempty"` } type SubTaskStatus_Check struct { Check *CheckStatus `protobuf:"bytes,7,opt,name=check,proto3,oneof" json:"check,omitempty"` } type SubTaskStatus_Dump struct { Dump *DumpStatus `protobuf:"bytes,8,opt,name=dump,proto3,oneof" json:"dump,omitempty"` } type SubTaskStatus_Load struct { Load *LoadStatus `protobuf:"bytes,9,opt,name=load,proto3,oneof" json:"load,omitempty"` } type SubTaskStatus_Sync struct { Sync *SyncStatus `protobuf:"bytes,10,opt,name=sync,proto3,oneof" json:"sync,omitempty"` } func (*SubTaskStatus_Msg) isSubTaskStatus_Status() {} func (*SubTaskStatus_Check) isSubTaskStatus_Status() {} func (*SubTaskStatus_Dump) isSubTaskStatus_Status() {} func (*SubTaskStatus_Load) isSubTaskStatus_Status() {} func (*SubTaskStatus_Sync) isSubTaskStatus_Status() {} func (m *SubTaskStatus) GetStatus() isSubTaskStatus_Status { if m != nil { return m.Status } return nil } func (m *SubTaskStatus) GetName() string { if m != nil { return m.Name } return "" } func (m *SubTaskStatus) GetStage() Stage { if m != nil { return m.Stage } return Stage_InvalidStage } func (m *SubTaskStatus) GetUnit() UnitType { if m != nil { return m.Unit } return UnitType_InvalidUnit } func (m *SubTaskStatus) GetResult() *ProcessResult { if m != nil { return m.Result } return nil } func (m *SubTaskStatus) GetUnresolvedDDLLockID() string { if m != nil { return m.UnresolvedDDLLockID } return "" } func (m *SubTaskStatus) GetMsg() string { if x, ok := m.GetStatus().(*SubTaskStatus_Msg); ok { return x.Msg } return "" } func (m *SubTaskStatus) GetCheck() *CheckStatus { if x, ok := m.GetStatus().(*SubTaskStatus_Check); ok { return x.Check } return nil } func (m *SubTaskStatus) GetDump() *DumpStatus { if x, ok := m.GetStatus().(*SubTaskStatus_Dump); ok { return x.Dump } return nil } func (m *SubTaskStatus) GetLoad() *LoadStatus { if x, ok := m.GetStatus().(*SubTaskStatus_Load); ok { return x.Load } return nil } func (m *SubTaskStatus) GetSync() *SyncStatus { if x, ok := m.GetStatus().(*SubTaskStatus_Sync); ok { return x.Sync } return nil } func (m *SubTaskStatus) GetValidation() *ValidationStatus { if m != nil { return m.Validation } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*SubTaskStatus) XXX_OneofWrappers() []interface{} { return []interface{}{ (*SubTaskStatus_Msg)(nil), (*SubTaskStatus_Check)(nil), (*SubTaskStatus_Dump)(nil), (*SubTaskStatus_Load)(nil), (*SubTaskStatus_Sync)(nil), } } // SubTaskStatusList used for internal jsonpb marshal type SubTaskStatusList struct { Status []*SubTaskStatus `protobuf:"bytes,1,rep,name=status,proto3" json:"status,omitempty"` } func (m *SubTaskStatusList) Reset() { *m = SubTaskStatusList{} } func (m *SubTaskStatusList) String() string { return proto.CompactTextString(m) } func (*SubTaskStatusList) ProtoMessage() {} func (*SubTaskStatusList) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{11} } func (m *SubTaskStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SubTaskStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SubTaskStatusList.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SubTaskStatusList) XXX_Merge(src proto.Message) { xxx_messageInfo_SubTaskStatusList.Merge(m, src) } func (m *SubTaskStatusList) XXX_Size() int { return m.Size() } func (m *SubTaskStatusList) XXX_DiscardUnknown() { xxx_messageInfo_SubTaskStatusList.DiscardUnknown(m) } var xxx_messageInfo_SubTaskStatusList proto.InternalMessageInfo func (m *SubTaskStatusList) GetStatus() []*SubTaskStatus { if m != nil { return m.Status } return nil } // CheckError represents error for check unit // adds fields later type CheckError struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *CheckError) Reset() { *m = CheckError{} } func (m *CheckError) String() string { return proto.CompactTextString(m) } func (*CheckError) ProtoMessage() {} func (*CheckError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{12} } func (m *CheckError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *CheckError) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckError.Merge(m, src) } func (m *CheckError) XXX_Size() int { return m.Size() } func (m *CheckError) XXX_DiscardUnknown() { xxx_messageInfo_CheckError.DiscardUnknown(m) } var xxx_messageInfo_CheckError proto.InternalMessageInfo func (m *CheckError) GetMsg() string { if m != nil { return m.Msg } return "" } // DumpError represents error for dump unit // add fields later type DumpError struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *DumpError) Reset() { *m = DumpError{} } func (m *DumpError) String() string { return proto.CompactTextString(m) } func (*DumpError) ProtoMessage() {} func (*DumpError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{13} } func (m *DumpError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *DumpError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_DumpError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *DumpError) XXX_Merge(src proto.Message) { xxx_messageInfo_DumpError.Merge(m, src) } func (m *DumpError) XXX_Size() int { return m.Size() } func (m *DumpError) XXX_DiscardUnknown() { xxx_messageInfo_DumpError.DiscardUnknown(m) } var xxx_messageInfo_DumpError proto.InternalMessageInfo func (m *DumpError) GetMsg() string { if m != nil { return m.Msg } return "" } // LoadError represents error for load unit type LoadError struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *LoadError) Reset() { *m = LoadError{} } func (m *LoadError) String() string { return proto.CompactTextString(m) } func (*LoadError) ProtoMessage() {} func (*LoadError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{14} } func (m *LoadError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LoadError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_LoadError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *LoadError) XXX_Merge(src proto.Message) { xxx_messageInfo_LoadError.Merge(m, src) } func (m *LoadError) XXX_Size() int { return m.Size() } func (m *LoadError) XXX_DiscardUnknown() { xxx_messageInfo_LoadError.DiscardUnknown(m) } var xxx_messageInfo_LoadError proto.InternalMessageInfo func (m *LoadError) GetMsg() string { if m != nil { return m.Msg } return "" } // SyncSQLError represents a sql error in sync unit type SyncSQLError struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` FailedBinlogPosition string `protobuf:"bytes,2,opt,name=failedBinlogPosition,proto3" json:"failedBinlogPosition,omitempty"` ErrorSQL string `protobuf:"bytes,3,opt,name=errorSQL,proto3" json:"errorSQL,omitempty"` } func (m *SyncSQLError) Reset() { *m = SyncSQLError{} } func (m *SyncSQLError) String() string { return proto.CompactTextString(m) } func (*SyncSQLError) ProtoMessage() {} func (*SyncSQLError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{15} } func (m *SyncSQLError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SyncSQLError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SyncSQLError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SyncSQLError) XXX_Merge(src proto.Message) { xxx_messageInfo_SyncSQLError.Merge(m, src) } func (m *SyncSQLError) XXX_Size() int { return m.Size() } func (m *SyncSQLError) XXX_DiscardUnknown() { xxx_messageInfo_SyncSQLError.DiscardUnknown(m) } var xxx_messageInfo_SyncSQLError proto.InternalMessageInfo func (m *SyncSQLError) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *SyncSQLError) GetFailedBinlogPosition() string { if m != nil { return m.FailedBinlogPosition } return "" } func (m *SyncSQLError) GetErrorSQL() string { if m != nil { return m.ErrorSQL } return "" } // SyncError represents error list for sync unit type SyncError struct { Errors []*SyncSQLError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` } func (m *SyncError) Reset() { *m = SyncError{} } func (m *SyncError) String() string { return proto.CompactTextString(m) } func (*SyncError) ProtoMessage() {} func (*SyncError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{16} } func (m *SyncError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SyncError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SyncError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SyncError) XXX_Merge(src proto.Message) { xxx_messageInfo_SyncError.Merge(m, src) } func (m *SyncError) XXX_Size() int { return m.Size() } func (m *SyncError) XXX_DiscardUnknown() { xxx_messageInfo_SyncError.DiscardUnknown(m) } var xxx_messageInfo_SyncError proto.InternalMessageInfo func (m *SyncError) GetErrors() []*SyncSQLError { if m != nil { return m.Errors } return nil } // SourceError represents error for start/stop source on dm-worker type SourceError struct { Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` Worker string `protobuf:"bytes,2,opt,name=worker,proto3" json:"worker,omitempty"` SourceError string `protobuf:"bytes,3,opt,name=SourceError,proto3" json:"SourceError,omitempty"` RelayError *RelayError `protobuf:"bytes,4,opt,name=RelayError,proto3" json:"RelayError,omitempty"` } func (m *SourceError) Reset() { *m = SourceError{} } func (m *SourceError) String() string { return proto.CompactTextString(m) } func (*SourceError) ProtoMessage() {} func (*SourceError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{17} } func (m *SourceError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SourceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SourceError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SourceError) XXX_Merge(src proto.Message) { xxx_messageInfo_SourceError.Merge(m, src) } func (m *SourceError) XXX_Size() int { return m.Size() } func (m *SourceError) XXX_DiscardUnknown() { xxx_messageInfo_SourceError.DiscardUnknown(m) } var xxx_messageInfo_SourceError proto.InternalMessageInfo func (m *SourceError) GetSource() string { if m != nil { return m.Source } return "" } func (m *SourceError) GetWorker() string { if m != nil { return m.Worker } return "" } func (m *SourceError) GetSourceError() string { if m != nil { return m.SourceError } return "" } func (m *SourceError) GetRelayError() *RelayError { if m != nil { return m.RelayError } return nil } // RelayError represents error for relay unit. type RelayError struct { Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *RelayError) Reset() { *m = RelayError{} } func (m *RelayError) String() string { return proto.CompactTextString(m) } func (*RelayError) ProtoMessage() {} func (*RelayError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{18} } func (m *RelayError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *RelayError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_RelayError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *RelayError) XXX_Merge(src proto.Message) { xxx_messageInfo_RelayError.Merge(m, src) } func (m *RelayError) XXX_Size() int { return m.Size() } func (m *RelayError) XXX_DiscardUnknown() { xxx_messageInfo_RelayError.DiscardUnknown(m) } var xxx_messageInfo_RelayError proto.InternalMessageInfo func (m *RelayError) GetMsg() string { if m != nil { return m.Msg } return "" } // SubTaskError represents error for a sub task during running // name: sub task'name, when starting a sub task the name should be unique // stage: sub task's current stage // unit: sub task's current dm unit's UnitType // error: current unit's error information // // for Sync, includes failed sql, failed sql pos in binlog, etc. type SubTaskError struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Stage Stage `protobuf:"varint,2,opt,name=stage,proto3,enum=pb.Stage" json:"stage,omitempty"` Unit UnitType `protobuf:"varint,3,opt,name=unit,proto3,enum=pb.UnitType" json:"unit,omitempty"` // Types that are valid to be assigned to Error: // // *SubTaskError_Msg // *SubTaskError_Check // *SubTaskError_Dump // *SubTaskError_Load // *SubTaskError_Sync Error isSubTaskError_Error `protobuf_oneof:"error"` } func (m *SubTaskError) Reset() { *m = SubTaskError{} } func (m *SubTaskError) String() string { return proto.CompactTextString(m) } func (*SubTaskError) ProtoMessage() {} func (*SubTaskError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{19} } func (m *SubTaskError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SubTaskError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SubTaskError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SubTaskError) XXX_Merge(src proto.Message) { xxx_messageInfo_SubTaskError.Merge(m, src) } func (m *SubTaskError) XXX_Size() int { return m.Size() } func (m *SubTaskError) XXX_DiscardUnknown() { xxx_messageInfo_SubTaskError.DiscardUnknown(m) } var xxx_messageInfo_SubTaskError proto.InternalMessageInfo type isSubTaskError_Error interface { isSubTaskError_Error() MarshalTo([]byte) (int, error) Size() int } type SubTaskError_Msg struct { Msg string `protobuf:"bytes,4,opt,name=msg,proto3,oneof" json:"msg,omitempty"` } type SubTaskError_Check struct { Check *CheckError `protobuf:"bytes,5,opt,name=check,proto3,oneof" json:"check,omitempty"` } type SubTaskError_Dump struct { Dump *DumpError `protobuf:"bytes,6,opt,name=dump,proto3,oneof" json:"dump,omitempty"` } type SubTaskError_Load struct { Load *LoadError `protobuf:"bytes,7,opt,name=load,proto3,oneof" json:"load,omitempty"` } type SubTaskError_Sync struct { Sync *SyncError `protobuf:"bytes,8,opt,name=sync,proto3,oneof" json:"sync,omitempty"` } func (*SubTaskError_Msg) isSubTaskError_Error() {} func (*SubTaskError_Check) isSubTaskError_Error() {} func (*SubTaskError_Dump) isSubTaskError_Error() {} func (*SubTaskError_Load) isSubTaskError_Error() {} func (*SubTaskError_Sync) isSubTaskError_Error() {} func (m *SubTaskError) GetError() isSubTaskError_Error { if m != nil { return m.Error } return nil } func (m *SubTaskError) GetName() string { if m != nil { return m.Name } return "" } func (m *SubTaskError) GetStage() Stage { if m != nil { return m.Stage } return Stage_InvalidStage } func (m *SubTaskError) GetUnit() UnitType { if m != nil { return m.Unit } return UnitType_InvalidUnit } func (m *SubTaskError) GetMsg() string { if x, ok := m.GetError().(*SubTaskError_Msg); ok { return x.Msg } return "" } func (m *SubTaskError) GetCheck() *CheckError { if x, ok := m.GetError().(*SubTaskError_Check); ok { return x.Check } return nil } func (m *SubTaskError) GetDump() *DumpError { if x, ok := m.GetError().(*SubTaskError_Dump); ok { return x.Dump } return nil } func (m *SubTaskError) GetLoad() *LoadError { if x, ok := m.GetError().(*SubTaskError_Load); ok { return x.Load } return nil } func (m *SubTaskError) GetSync() *SyncError { if x, ok := m.GetError().(*SubTaskError_Sync); ok { return x.Sync } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*SubTaskError) XXX_OneofWrappers() []interface{} { return []interface{}{ (*SubTaskError_Msg)(nil), (*SubTaskError_Check)(nil), (*SubTaskError_Dump)(nil), (*SubTaskError_Load)(nil), (*SubTaskError_Sync)(nil), } } // SubTaskErrorList used for internal jsonpb marshal type SubTaskErrorList struct { Error []*SubTaskError `protobuf:"bytes,1,rep,name=error,proto3" json:"error,omitempty"` } func (m *SubTaskErrorList) Reset() { *m = SubTaskErrorList{} } func (m *SubTaskErrorList) String() string { return proto.CompactTextString(m) } func (*SubTaskErrorList) ProtoMessage() {} func (*SubTaskErrorList) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{20} } func (m *SubTaskErrorList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *SubTaskErrorList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_SubTaskErrorList.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *SubTaskErrorList) XXX_Merge(src proto.Message) { xxx_messageInfo_SubTaskErrorList.Merge(m, src) } func (m *SubTaskErrorList) XXX_Size() int { return m.Size() } func (m *SubTaskErrorList) XXX_DiscardUnknown() { xxx_messageInfo_SubTaskErrorList.DiscardUnknown(m) } var xxx_messageInfo_SubTaskErrorList proto.InternalMessageInfo func (m *SubTaskErrorList) GetError() []*SubTaskError { if m != nil { return m.Error } return nil } // ProcessResult represents results produced by a dm unit // isCanceled: indicates whether the process is canceled from external // // when Stop or Pause is requested from external, isCanceled will be true // // errors: includes all (potential) errors occured when processing type ProcessResult struct { IsCanceled bool `protobuf:"varint,1,opt,name=isCanceled,proto3" json:"isCanceled,omitempty"` Errors []*ProcessError `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` Detail []byte `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` } func (m *ProcessResult) Reset() { *m = ProcessResult{} } func (m *ProcessResult) String() string { return proto.CompactTextString(m) } func (*ProcessResult) ProtoMessage() {} func (*ProcessResult) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{21} } func (m *ProcessResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ProcessResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ProcessResult.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ProcessResult) XXX_Merge(src proto.Message) { xxx_messageInfo_ProcessResult.Merge(m, src) } func (m *ProcessResult) XXX_Size() int { return m.Size() } func (m *ProcessResult) XXX_DiscardUnknown() { xxx_messageInfo_ProcessResult.DiscardUnknown(m) } var xxx_messageInfo_ProcessResult proto.InternalMessageInfo func (m *ProcessResult) GetIsCanceled() bool { if m != nil { return m.IsCanceled } return false } func (m *ProcessResult) GetErrors() []*ProcessError { if m != nil { return m.Errors } return nil } func (m *ProcessResult) GetDetail() []byte { if m != nil { return m.Detail } return nil } // ProcessError is same as terror used in dm // NOTE: currently stack trace is not supported, `Message` is the `terror.Error.getMsg` result // and `RawCause` is the `Error` result of error from `terror.Error.Cause()`. type ProcessError struct { ErrCode int32 `protobuf:"varint,1,opt,name=ErrCode,proto3" json:"ErrCode,omitempty"` ErrClass string `protobuf:"bytes,2,opt,name=ErrClass,proto3" json:"ErrClass,omitempty"` ErrScope string `protobuf:"bytes,3,opt,name=ErrScope,proto3" json:"ErrScope,omitempty"` ErrLevel string `protobuf:"bytes,4,opt,name=ErrLevel,proto3" json:"ErrLevel,omitempty"` Message string `protobuf:"bytes,5,opt,name=Message,proto3" json:"Message,omitempty"` RawCause string `protobuf:"bytes,6,opt,name=RawCause,proto3" json:"RawCause,omitempty"` Workaround string `protobuf:"bytes,7,opt,name=Workaround,proto3" json:"Workaround,omitempty"` } func (m *ProcessError) Reset() { *m = ProcessError{} } func (m *ProcessError) String() string { return proto.CompactTextString(m) } func (*ProcessError) ProtoMessage() {} func (*ProcessError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{22} } func (m *ProcessError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ProcessError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ProcessError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ProcessError) XXX_Merge(src proto.Message) { xxx_messageInfo_ProcessError.Merge(m, src) } func (m *ProcessError) XXX_Size() int { return m.Size() } func (m *ProcessError) XXX_DiscardUnknown() { xxx_messageInfo_ProcessError.DiscardUnknown(m) } var xxx_messageInfo_ProcessError proto.InternalMessageInfo func (m *ProcessError) GetErrCode() int32 { if m != nil { return m.ErrCode } return 0 } func (m *ProcessError) GetErrClass() string { if m != nil { return m.ErrClass } return "" } func (m *ProcessError) GetErrScope() string { if m != nil { return m.ErrScope } return "" } func (m *ProcessError) GetErrLevel() string { if m != nil { return m.ErrLevel } return "" } func (m *ProcessError) GetMessage() string { if m != nil { return m.Message } return "" } func (m *ProcessError) GetRawCause() string { if m != nil { return m.RawCause } return "" } func (m *ProcessError) GetWorkaround() string { if m != nil { return m.Workaround } return "" } // PurgeRelayRequest represents a request to purge relay log files for this dm-worker // inactive: whether purge inactive relay log files // time: whether purge relay log files before this time, the number of seconds elapsed since January 1, 1970 UTC // filename: whether purge relay log files before this filename // subDir: specify relay sub directory for @filename type PurgeRelayRequest struct { Inactive bool `protobuf:"varint,1,opt,name=inactive,proto3" json:"inactive,omitempty"` Time int64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"` Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` SubDir string `protobuf:"bytes,4,opt,name=subDir,proto3" json:"subDir,omitempty"` } func (m *PurgeRelayRequest) Reset() { *m = PurgeRelayRequest{} } func (m *PurgeRelayRequest) String() string { return proto.CompactTextString(m) } func (*PurgeRelayRequest) ProtoMessage() {} func (*PurgeRelayRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{23} } func (m *PurgeRelayRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *PurgeRelayRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_PurgeRelayRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *PurgeRelayRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_PurgeRelayRequest.Merge(m, src) } func (m *PurgeRelayRequest) XXX_Size() int { return m.Size() } func (m *PurgeRelayRequest) XXX_DiscardUnknown() { xxx_messageInfo_PurgeRelayRequest.DiscardUnknown(m) } var xxx_messageInfo_PurgeRelayRequest proto.InternalMessageInfo func (m *PurgeRelayRequest) GetInactive() bool { if m != nil { return m.Inactive } return false } func (m *PurgeRelayRequest) GetTime() int64 { if m != nil { return m.Time } return 0 } func (m *PurgeRelayRequest) GetFilename() string { if m != nil { return m.Filename } return "" } func (m *PurgeRelayRequest) GetSubDir() string { if m != nil { return m.SubDir } return "" } type OperateWorkerSchemaRequest struct { Op SchemaOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.SchemaOp" json:"op,omitempty"` Task string `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` Source string `protobuf:"bytes,3,opt,name=source,proto3" json:"source,omitempty"` Database string `protobuf:"bytes,4,opt,name=database,proto3" json:"database,omitempty"` Table string `protobuf:"bytes,5,opt,name=table,proto3" json:"table,omitempty"` Schema string `protobuf:"bytes,6,opt,name=schema,proto3" json:"schema,omitempty"` Flush bool `protobuf:"varint,7,opt,name=flush,proto3" json:"flush,omitempty"` Sync bool `protobuf:"varint,8,opt,name=sync,proto3" json:"sync,omitempty"` FromSource bool `protobuf:"varint,9,opt,name=fromSource,proto3" json:"fromSource,omitempty"` FromTarget bool `protobuf:"varint,10,opt,name=fromTarget,proto3" json:"fromTarget,omitempty"` } func (m *OperateWorkerSchemaRequest) Reset() { *m = OperateWorkerSchemaRequest{} } func (m *OperateWorkerSchemaRequest) String() string { return proto.CompactTextString(m) } func (*OperateWorkerSchemaRequest) ProtoMessage() {} func (*OperateWorkerSchemaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{24} } func (m *OperateWorkerSchemaRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateWorkerSchemaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateWorkerSchemaRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateWorkerSchemaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateWorkerSchemaRequest.Merge(m, src) } func (m *OperateWorkerSchemaRequest) XXX_Size() int { return m.Size() } func (m *OperateWorkerSchemaRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateWorkerSchemaRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateWorkerSchemaRequest proto.InternalMessageInfo func (m *OperateWorkerSchemaRequest) GetOp() SchemaOp { if m != nil { return m.Op } return SchemaOp_InvalidSchemaOp } func (m *OperateWorkerSchemaRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *OperateWorkerSchemaRequest) GetSource() string { if m != nil { return m.Source } return "" } func (m *OperateWorkerSchemaRequest) GetDatabase() string { if m != nil { return m.Database } return "" } func (m *OperateWorkerSchemaRequest) GetTable() string { if m != nil { return m.Table } return "" } func (m *OperateWorkerSchemaRequest) GetSchema() string { if m != nil { return m.Schema } return "" } func (m *OperateWorkerSchemaRequest) GetFlush() bool { if m != nil { return m.Flush } return false } func (m *OperateWorkerSchemaRequest) GetSync() bool { if m != nil { return m.Sync } return false } func (m *OperateWorkerSchemaRequest) GetFromSource() bool { if m != nil { return m.FromSource } return false } func (m *OperateWorkerSchemaRequest) GetFromTarget() bool { if m != nil { return m.FromTarget } return false } // copied `TaskMeta` from release-1.0 branch. type V1SubTaskMeta struct { Op TaskOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.TaskOp" json:"op,omitempty"` Stage Stage `protobuf:"varint,2,opt,name=stage,proto3,enum=pb.Stage" json:"stage,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` Task []byte `protobuf:"bytes,4,opt,name=task,proto3" json:"task,omitempty"` } func (m *V1SubTaskMeta) Reset() { *m = V1SubTaskMeta{} } func (m *V1SubTaskMeta) String() string { return proto.CompactTextString(m) } func (*V1SubTaskMeta) ProtoMessage() {} func (*V1SubTaskMeta) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{25} } func (m *V1SubTaskMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *V1SubTaskMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_V1SubTaskMeta.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *V1SubTaskMeta) XXX_Merge(src proto.Message) { xxx_messageInfo_V1SubTaskMeta.Merge(m, src) } func (m *V1SubTaskMeta) XXX_Size() int { return m.Size() } func (m *V1SubTaskMeta) XXX_DiscardUnknown() { xxx_messageInfo_V1SubTaskMeta.DiscardUnknown(m) } var xxx_messageInfo_V1SubTaskMeta proto.InternalMessageInfo func (m *V1SubTaskMeta) GetOp() TaskOp { if m != nil { return m.Op } return TaskOp_InvalidOp } func (m *V1SubTaskMeta) GetStage() Stage { if m != nil { return m.Stage } return Stage_InvalidStage } func (m *V1SubTaskMeta) GetName() string { if m != nil { return m.Name } return "" } func (m *V1SubTaskMeta) GetTask() []byte { if m != nil { return m.Task } return nil } type OperateV1MetaRequest struct { Op V1MetaOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.V1MetaOp" json:"op,omitempty"` } func (m *OperateV1MetaRequest) Reset() { *m = OperateV1MetaRequest{} } func (m *OperateV1MetaRequest) String() string { return proto.CompactTextString(m) } func (*OperateV1MetaRequest) ProtoMessage() {} func (*OperateV1MetaRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{26} } func (m *OperateV1MetaRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateV1MetaRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateV1MetaRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateV1MetaRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateV1MetaRequest.Merge(m, src) } func (m *OperateV1MetaRequest) XXX_Size() int { return m.Size() } func (m *OperateV1MetaRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateV1MetaRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateV1MetaRequest proto.InternalMessageInfo func (m *OperateV1MetaRequest) GetOp() V1MetaOp { if m != nil { return m.Op } return V1MetaOp_InvalidV1MetaOp } type OperateV1MetaResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Meta map[string]*V1SubTaskMeta `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *OperateV1MetaResponse) Reset() { *m = OperateV1MetaResponse{} } func (m *OperateV1MetaResponse) String() string { return proto.CompactTextString(m) } func (*OperateV1MetaResponse) ProtoMessage() {} func (*OperateV1MetaResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{27} } func (m *OperateV1MetaResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateV1MetaResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateV1MetaResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateV1MetaResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateV1MetaResponse.Merge(m, src) } func (m *OperateV1MetaResponse) XXX_Size() int { return m.Size() } func (m *OperateV1MetaResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateV1MetaResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateV1MetaResponse proto.InternalMessageInfo func (m *OperateV1MetaResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateV1MetaResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *OperateV1MetaResponse) GetMeta() map[string]*V1SubTaskMeta { if m != nil { return m.Meta } return nil } type HandleWorkerErrorRequest struct { Op ErrorOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.ErrorOp" json:"op,omitempty"` Task string `protobuf:"bytes,2,opt,name=task,proto3" json:"task,omitempty"` BinlogPos string `protobuf:"bytes,3,opt,name=binlogPos,proto3" json:"binlogPos,omitempty"` Sqls []string `protobuf:"bytes,4,rep,name=sqls,proto3" json:"sqls,omitempty"` } func (m *HandleWorkerErrorRequest) Reset() { *m = HandleWorkerErrorRequest{} } func (m *HandleWorkerErrorRequest) String() string { return proto.CompactTextString(m) } func (*HandleWorkerErrorRequest) ProtoMessage() {} func (*HandleWorkerErrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{28} } func (m *HandleWorkerErrorRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *HandleWorkerErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_HandleWorkerErrorRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *HandleWorkerErrorRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_HandleWorkerErrorRequest.Merge(m, src) } func (m *HandleWorkerErrorRequest) XXX_Size() int { return m.Size() } func (m *HandleWorkerErrorRequest) XXX_DiscardUnknown() { xxx_messageInfo_HandleWorkerErrorRequest.DiscardUnknown(m) } var xxx_messageInfo_HandleWorkerErrorRequest proto.InternalMessageInfo func (m *HandleWorkerErrorRequest) GetOp() ErrorOp { if m != nil { return m.Op } return ErrorOp_InvalidErrorOp } func (m *HandleWorkerErrorRequest) GetTask() string { if m != nil { return m.Task } return "" } func (m *HandleWorkerErrorRequest) GetBinlogPos() string { if m != nil { return m.BinlogPos } return "" } func (m *HandleWorkerErrorRequest) GetSqls() []string { if m != nil { return m.Sqls } return nil } type GetWorkerCfgRequest struct { } func (m *GetWorkerCfgRequest) Reset() { *m = GetWorkerCfgRequest{} } func (m *GetWorkerCfgRequest) String() string { return proto.CompactTextString(m) } func (*GetWorkerCfgRequest) ProtoMessage() {} func (*GetWorkerCfgRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{29} } func (m *GetWorkerCfgRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetWorkerCfgRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetWorkerCfgRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetWorkerCfgRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetWorkerCfgRequest.Merge(m, src) } func (m *GetWorkerCfgRequest) XXX_Size() int { return m.Size() } func (m *GetWorkerCfgRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetWorkerCfgRequest.DiscardUnknown(m) } var xxx_messageInfo_GetWorkerCfgRequest proto.InternalMessageInfo type GetWorkerCfgResponse struct { Cfg string `protobuf:"bytes,1,opt,name=cfg,proto3" json:"cfg,omitempty"` } func (m *GetWorkerCfgResponse) Reset() { *m = GetWorkerCfgResponse{} } func (m *GetWorkerCfgResponse) String() string { return proto.CompactTextString(m) } func (*GetWorkerCfgResponse) ProtoMessage() {} func (*GetWorkerCfgResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{30} } func (m *GetWorkerCfgResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetWorkerCfgResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetWorkerCfgResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetWorkerCfgResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetWorkerCfgResponse.Merge(m, src) } func (m *GetWorkerCfgResponse) XXX_Size() int { return m.Size() } func (m *GetWorkerCfgResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetWorkerCfgResponse.DiscardUnknown(m) } var xxx_messageInfo_GetWorkerCfgResponse proto.InternalMessageInfo func (m *GetWorkerCfgResponse) GetCfg() string { if m != nil { return m.Cfg } return "" } type CheckSubtasksCanUpdateRequest struct { SubtaskCfgTomlString string `protobuf:"bytes,1,opt,name=subtaskCfgTomlString,proto3" json:"subtaskCfgTomlString,omitempty"` } func (m *CheckSubtasksCanUpdateRequest) Reset() { *m = CheckSubtasksCanUpdateRequest{} } func (m *CheckSubtasksCanUpdateRequest) String() string { return proto.CompactTextString(m) } func (*CheckSubtasksCanUpdateRequest) ProtoMessage() {} func (*CheckSubtasksCanUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{31} } func (m *CheckSubtasksCanUpdateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckSubtasksCanUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckSubtasksCanUpdateRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *CheckSubtasksCanUpdateRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckSubtasksCanUpdateRequest.Merge(m, src) } func (m *CheckSubtasksCanUpdateRequest) XXX_Size() int { return m.Size() } func (m *CheckSubtasksCanUpdateRequest) XXX_DiscardUnknown() { xxx_messageInfo_CheckSubtasksCanUpdateRequest.DiscardUnknown(m) } var xxx_messageInfo_CheckSubtasksCanUpdateRequest proto.InternalMessageInfo func (m *CheckSubtasksCanUpdateRequest) GetSubtaskCfgTomlString() string { if m != nil { return m.SubtaskCfgTomlString } return "" } type CheckSubtasksCanUpdateResponse struct { Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *CheckSubtasksCanUpdateResponse) Reset() { *m = CheckSubtasksCanUpdateResponse{} } func (m *CheckSubtasksCanUpdateResponse) String() string { return proto.CompactTextString(m) } func (*CheckSubtasksCanUpdateResponse) ProtoMessage() {} func (*CheckSubtasksCanUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{32} } func (m *CheckSubtasksCanUpdateResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *CheckSubtasksCanUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_CheckSubtasksCanUpdateResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *CheckSubtasksCanUpdateResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CheckSubtasksCanUpdateResponse.Merge(m, src) } func (m *CheckSubtasksCanUpdateResponse) XXX_Size() int { return m.Size() } func (m *CheckSubtasksCanUpdateResponse) XXX_DiscardUnknown() { xxx_messageInfo_CheckSubtasksCanUpdateResponse.DiscardUnknown(m) } var xxx_messageInfo_CheckSubtasksCanUpdateResponse proto.InternalMessageInfo func (m *CheckSubtasksCanUpdateResponse) GetSuccess() bool { if m != nil { return m.Success } return false } func (m *CheckSubtasksCanUpdateResponse) GetMsg() string { if m != nil { return m.Msg } return "" } type GetValidationStatusRequest struct { TaskName string `protobuf:"bytes,1,opt,name=taskName,proto3" json:"taskName,omitempty"` FilterStatus Stage `protobuf:"varint,2,opt,name=filterStatus,proto3,enum=pb.Stage" json:"filterStatus,omitempty"` } func (m *GetValidationStatusRequest) Reset() { *m = GetValidationStatusRequest{} } func (m *GetValidationStatusRequest) String() string { return proto.CompactTextString(m) } func (*GetValidationStatusRequest) ProtoMessage() {} func (*GetValidationStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{33} } func (m *GetValidationStatusRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetValidationStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetValidationStatusRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetValidationStatusRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetValidationStatusRequest.Merge(m, src) } func (m *GetValidationStatusRequest) XXX_Size() int { return m.Size() } func (m *GetValidationStatusRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetValidationStatusRequest.DiscardUnknown(m) } var xxx_messageInfo_GetValidationStatusRequest proto.InternalMessageInfo func (m *GetValidationStatusRequest) GetTaskName() string { if m != nil { return m.TaskName } return "" } func (m *GetValidationStatusRequest) GetFilterStatus() Stage { if m != nil { return m.FilterStatus } return Stage_InvalidStage } type ValidationStatus struct { Task string `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` Mode string `protobuf:"bytes,3,opt,name=mode,proto3" json:"mode,omitempty"` Stage Stage `protobuf:"varint,4,opt,name=stage,proto3,enum=pb.Stage" json:"stage,omitempty"` ValidatorBinlog string `protobuf:"bytes,5,opt,name=validatorBinlog,proto3" json:"validatorBinlog,omitempty"` ValidatorBinlogGtid string `protobuf:"bytes,6,opt,name=validatorBinlogGtid,proto3" json:"validatorBinlogGtid,omitempty"` Result *ProcessResult `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` ProcessedRowsStatus string `protobuf:"bytes,8,opt,name=processedRowsStatus,proto3" json:"processedRowsStatus,omitempty"` PendingRowsStatus string `protobuf:"bytes,9,opt,name=pendingRowsStatus,proto3" json:"pendingRowsStatus,omitempty"` ErrorRowsStatus string `protobuf:"bytes,10,opt,name=errorRowsStatus,proto3" json:"errorRowsStatus,omitempty"` CutoverBinlogPos string `protobuf:"bytes,11,opt,name=cutoverBinlogPos,proto3" json:"cutoverBinlogPos,omitempty"` CutoverBinlogGtid string `protobuf:"bytes,12,opt,name=cutoverBinlogGtid,proto3" json:"cutoverBinlogGtid,omitempty"` } func (m *ValidationStatus) Reset() { *m = ValidationStatus{} } func (m *ValidationStatus) String() string { return proto.CompactTextString(m) } func (*ValidationStatus) ProtoMessage() {} func (*ValidationStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{34} } func (m *ValidationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ValidationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ValidationStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ValidationStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_ValidationStatus.Merge(m, src) } func (m *ValidationStatus) XXX_Size() int { return m.Size() } func (m *ValidationStatus) XXX_DiscardUnknown() { xxx_messageInfo_ValidationStatus.DiscardUnknown(m) } var xxx_messageInfo_ValidationStatus proto.InternalMessageInfo func (m *ValidationStatus) GetTask() string { if m != nil { return m.Task } return "" } func (m *ValidationStatus) GetSource() string { if m != nil { return m.Source } return "" } func (m *ValidationStatus) GetMode() string { if m != nil { return m.Mode } return "" } func (m *ValidationStatus) GetStage() Stage { if m != nil { return m.Stage } return Stage_InvalidStage } func (m *ValidationStatus) GetValidatorBinlog() string { if m != nil { return m.ValidatorBinlog } return "" } func (m *ValidationStatus) GetValidatorBinlogGtid() string { if m != nil { return m.ValidatorBinlogGtid } return "" } func (m *ValidationStatus) GetResult() *ProcessResult { if m != nil { return m.Result } return nil } func (m *ValidationStatus) GetProcessedRowsStatus() string { if m != nil { return m.ProcessedRowsStatus } return "" } func (m *ValidationStatus) GetPendingRowsStatus() string { if m != nil { return m.PendingRowsStatus } return "" } func (m *ValidationStatus) GetErrorRowsStatus() string { if m != nil { return m.ErrorRowsStatus } return "" } func (m *ValidationStatus) GetCutoverBinlogPos() string { if m != nil { return m.CutoverBinlogPos } return "" } func (m *ValidationStatus) GetCutoverBinlogGtid() string { if m != nil { return m.CutoverBinlogGtid } return "" } type ValidationTableStatus struct { Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` SrcTable string `protobuf:"bytes,2,opt,name=srcTable,proto3" json:"srcTable,omitempty"` DstTable string `protobuf:"bytes,3,opt,name=dstTable,proto3" json:"dstTable,omitempty"` Stage Stage `protobuf:"varint,4,opt,name=stage,proto3,enum=pb.Stage" json:"stage,omitempty"` Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` } func (m *ValidationTableStatus) Reset() { *m = ValidationTableStatus{} } func (m *ValidationTableStatus) String() string { return proto.CompactTextString(m) } func (*ValidationTableStatus) ProtoMessage() {} func (*ValidationTableStatus) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{35} } func (m *ValidationTableStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ValidationTableStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ValidationTableStatus.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ValidationTableStatus) XXX_Merge(src proto.Message) { xxx_messageInfo_ValidationTableStatus.Merge(m, src) } func (m *ValidationTableStatus) XXX_Size() int { return m.Size() } func (m *ValidationTableStatus) XXX_DiscardUnknown() { xxx_messageInfo_ValidationTableStatus.DiscardUnknown(m) } var xxx_messageInfo_ValidationTableStatus proto.InternalMessageInfo func (m *ValidationTableStatus) GetSource() string { if m != nil { return m.Source } return "" } func (m *ValidationTableStatus) GetSrcTable() string { if m != nil { return m.SrcTable } return "" } func (m *ValidationTableStatus) GetDstTable() string { if m != nil { return m.DstTable } return "" } func (m *ValidationTableStatus) GetStage() Stage { if m != nil { return m.Stage } return Stage_InvalidStage } func (m *ValidationTableStatus) GetMessage() string { if m != nil { return m.Message } return "" } type GetValidationStatusResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Validators []*ValidationStatus `protobuf:"bytes,3,rep,name=validators,proto3" json:"validators,omitempty"` TableStatuses []*ValidationTableStatus `protobuf:"bytes,4,rep,name=tableStatuses,proto3" json:"tableStatuses,omitempty"` } func (m *GetValidationStatusResponse) Reset() { *m = GetValidationStatusResponse{} } func (m *GetValidationStatusResponse) String() string { return proto.CompactTextString(m) } func (*GetValidationStatusResponse) ProtoMessage() {} func (*GetValidationStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{36} } func (m *GetValidationStatusResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetValidationStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetValidationStatusResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetValidationStatusResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetValidationStatusResponse.Merge(m, src) } func (m *GetValidationStatusResponse) XXX_Size() int { return m.Size() } func (m *GetValidationStatusResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetValidationStatusResponse.DiscardUnknown(m) } var xxx_messageInfo_GetValidationStatusResponse proto.InternalMessageInfo func (m *GetValidationStatusResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *GetValidationStatusResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *GetValidationStatusResponse) GetValidators() []*ValidationStatus { if m != nil { return m.Validators } return nil } func (m *GetValidationStatusResponse) GetTableStatuses() []*ValidationTableStatus { if m != nil { return m.TableStatuses } return nil } type GetValidationErrorRequest struct { ErrState ValidateErrorState `protobuf:"varint,1,opt,name=errState,proto3,enum=pb.ValidateErrorState" json:"errState,omitempty"` TaskName string `protobuf:"bytes,2,opt,name=taskName,proto3" json:"taskName,omitempty"` } func (m *GetValidationErrorRequest) Reset() { *m = GetValidationErrorRequest{} } func (m *GetValidationErrorRequest) String() string { return proto.CompactTextString(m) } func (*GetValidationErrorRequest) ProtoMessage() {} func (*GetValidationErrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{37} } func (m *GetValidationErrorRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetValidationErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetValidationErrorRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetValidationErrorRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_GetValidationErrorRequest.Merge(m, src) } func (m *GetValidationErrorRequest) XXX_Size() int { return m.Size() } func (m *GetValidationErrorRequest) XXX_DiscardUnknown() { xxx_messageInfo_GetValidationErrorRequest.DiscardUnknown(m) } var xxx_messageInfo_GetValidationErrorRequest proto.InternalMessageInfo func (m *GetValidationErrorRequest) GetErrState() ValidateErrorState { if m != nil { return m.ErrState } return ValidateErrorState_InvalidErr } func (m *GetValidationErrorRequest) GetTaskName() string { if m != nil { return m.TaskName } return "" } type ValidationError struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` SrcTable string `protobuf:"bytes,3,opt,name=srcTable,proto3" json:"srcTable,omitempty"` SrcData string `protobuf:"bytes,4,opt,name=srcData,proto3" json:"srcData,omitempty"` DstTable string `protobuf:"bytes,5,opt,name=dstTable,proto3" json:"dstTable,omitempty"` DstData string `protobuf:"bytes,6,opt,name=dstData,proto3" json:"dstData,omitempty"` ErrorType string `protobuf:"bytes,7,opt,name=errorType,proto3" json:"errorType,omitempty"` Status ValidateErrorState `protobuf:"varint,8,opt,name=status,proto3,enum=pb.ValidateErrorState" json:"status,omitempty"` Time string `protobuf:"bytes,9,opt,name=time,proto3" json:"time,omitempty"` Message string `protobuf:"bytes,10,opt,name=message,proto3" json:"message,omitempty"` } func (m *ValidationError) Reset() { *m = ValidationError{} } func (m *ValidationError) String() string { return proto.CompactTextString(m) } func (*ValidationError) ProtoMessage() {} func (*ValidationError) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{38} } func (m *ValidationError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ValidationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ValidationError.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ValidationError) XXX_Merge(src proto.Message) { xxx_messageInfo_ValidationError.Merge(m, src) } func (m *ValidationError) XXX_Size() int { return m.Size() } func (m *ValidationError) XXX_DiscardUnknown() { xxx_messageInfo_ValidationError.DiscardUnknown(m) } var xxx_messageInfo_ValidationError proto.InternalMessageInfo func (m *ValidationError) GetId() string { if m != nil { return m.Id } return "" } func (m *ValidationError) GetSource() string { if m != nil { return m.Source } return "" } func (m *ValidationError) GetSrcTable() string { if m != nil { return m.SrcTable } return "" } func (m *ValidationError) GetSrcData() string { if m != nil { return m.SrcData } return "" } func (m *ValidationError) GetDstTable() string { if m != nil { return m.DstTable } return "" } func (m *ValidationError) GetDstData() string { if m != nil { return m.DstData } return "" } func (m *ValidationError) GetErrorType() string { if m != nil { return m.ErrorType } return "" } func (m *ValidationError) GetStatus() ValidateErrorState { if m != nil { return m.Status } return ValidateErrorState_InvalidErr } func (m *ValidationError) GetTime() string { if m != nil { return m.Time } return "" } func (m *ValidationError) GetMessage() string { if m != nil { return m.Message } return "" } type GetValidationErrorResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` Error []*ValidationError `protobuf:"bytes,3,rep,name=error,proto3" json:"error,omitempty"` } func (m *GetValidationErrorResponse) Reset() { *m = GetValidationErrorResponse{} } func (m *GetValidationErrorResponse) String() string { return proto.CompactTextString(m) } func (*GetValidationErrorResponse) ProtoMessage() {} func (*GetValidationErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{39} } func (m *GetValidationErrorResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *GetValidationErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_GetValidationErrorResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *GetValidationErrorResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_GetValidationErrorResponse.Merge(m, src) } func (m *GetValidationErrorResponse) XXX_Size() int { return m.Size() } func (m *GetValidationErrorResponse) XXX_DiscardUnknown() { xxx_messageInfo_GetValidationErrorResponse.DiscardUnknown(m) } var xxx_messageInfo_GetValidationErrorResponse proto.InternalMessageInfo func (m *GetValidationErrorResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *GetValidationErrorResponse) GetMsg() string { if m != nil { return m.Msg } return "" } func (m *GetValidationErrorResponse) GetError() []*ValidationError { if m != nil { return m.Error } return nil } type OperateValidationErrorRequest struct { Op ValidationErrOp `protobuf:"varint,1,opt,name=op,proto3,enum=pb.ValidationErrOp" json:"op,omitempty"` IsAllError bool `protobuf:"varint,2,opt,name=isAllError,proto3" json:"isAllError,omitempty"` TaskName string `protobuf:"bytes,3,opt,name=taskName,proto3" json:"taskName,omitempty"` ErrId uint64 `protobuf:"varint,4,opt,name=errId,proto3" json:"errId,omitempty"` } func (m *OperateValidationErrorRequest) Reset() { *m = OperateValidationErrorRequest{} } func (m *OperateValidationErrorRequest) String() string { return proto.CompactTextString(m) } func (*OperateValidationErrorRequest) ProtoMessage() {} func (*OperateValidationErrorRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{40} } func (m *OperateValidationErrorRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateValidationErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateValidationErrorRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateValidationErrorRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateValidationErrorRequest.Merge(m, src) } func (m *OperateValidationErrorRequest) XXX_Size() int { return m.Size() } func (m *OperateValidationErrorRequest) XXX_DiscardUnknown() { xxx_messageInfo_OperateValidationErrorRequest.DiscardUnknown(m) } var xxx_messageInfo_OperateValidationErrorRequest proto.InternalMessageInfo func (m *OperateValidationErrorRequest) GetOp() ValidationErrOp { if m != nil { return m.Op } return ValidationErrOp_InvalidErrOp } func (m *OperateValidationErrorRequest) GetIsAllError() bool { if m != nil { return m.IsAllError } return false } func (m *OperateValidationErrorRequest) GetTaskName() string { if m != nil { return m.TaskName } return "" } func (m *OperateValidationErrorRequest) GetErrId() uint64 { if m != nil { return m.ErrId } return 0 } type OperateValidationErrorResponse struct { Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg,omitempty"` } func (m *OperateValidationErrorResponse) Reset() { *m = OperateValidationErrorResponse{} } func (m *OperateValidationErrorResponse) String() string { return proto.CompactTextString(m) } func (*OperateValidationErrorResponse) ProtoMessage() {} func (*OperateValidationErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{41} } func (m *OperateValidationErrorResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *OperateValidationErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_OperateValidationErrorResponse.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *OperateValidationErrorResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_OperateValidationErrorResponse.Merge(m, src) } func (m *OperateValidationErrorResponse) XXX_Size() int { return m.Size() } func (m *OperateValidationErrorResponse) XXX_DiscardUnknown() { xxx_messageInfo_OperateValidationErrorResponse.DiscardUnknown(m) } var xxx_messageInfo_OperateValidationErrorResponse proto.InternalMessageInfo func (m *OperateValidationErrorResponse) GetResult() bool { if m != nil { return m.Result } return false } func (m *OperateValidationErrorResponse) GetMsg() string { if m != nil { return m.Msg } return "" } type UpdateValidationWorkerRequest struct { TaskName string `protobuf:"bytes,1,opt,name=taskName,proto3" json:"taskName,omitempty"` BinlogPos string `protobuf:"bytes,2,opt,name=binlogPos,proto3" json:"binlogPos,omitempty"` BinlogGTID string `protobuf:"bytes,3,opt,name=binlogGTID,proto3" json:"binlogGTID,omitempty"` } func (m *UpdateValidationWorkerRequest) Reset() { *m = UpdateValidationWorkerRequest{} } func (m *UpdateValidationWorkerRequest) String() string { return proto.CompactTextString(m) } func (*UpdateValidationWorkerRequest) ProtoMessage() {} func (*UpdateValidationWorkerRequest) Descriptor() ([]byte, []int) { return fileDescriptor_51a1b9e17fd67b10, []int{42} } func (m *UpdateValidationWorkerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *UpdateValidationWorkerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_UpdateValidationWorkerRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *UpdateValidationWorkerRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_UpdateValidationWorkerRequest.Merge(m, src) } func (m *UpdateValidationWorkerRequest) XXX_Size() int { return m.Size() } func (m *UpdateValidationWorkerRequest) XXX_DiscardUnknown() { xxx_messageInfo_UpdateValidationWorkerRequest.DiscardUnknown(m) } var xxx_messageInfo_UpdateValidationWorkerRequest proto.InternalMessageInfo func (m *UpdateValidationWorkerRequest) GetTaskName() string { if m != nil { return m.TaskName } return "" } func (m *UpdateValidationWorkerRequest) GetBinlogPos() string { if m != nil { return m.BinlogPos } return "" } func (m *UpdateValidationWorkerRequest) GetBinlogGTID() string { if m != nil { return m.BinlogGTID } return "" } func init() { proto.RegisterEnum("pb.TaskOp", TaskOp_name, TaskOp_value) proto.RegisterEnum("pb.Stage", Stage_name, Stage_value) proto.RegisterEnum("pb.UnitType", UnitType_name, UnitType_value) proto.RegisterEnum("pb.RelayOp", RelayOp_name, RelayOp_value) proto.RegisterEnum("pb.SchemaOp", SchemaOp_name, SchemaOp_value) proto.RegisterEnum("pb.V1MetaOp", V1MetaOp_name, V1MetaOp_value) proto.RegisterEnum("pb.ErrorOp", ErrorOp_name, ErrorOp_value) proto.RegisterEnum("pb.ValidatorOp", ValidatorOp_name, ValidatorOp_value) proto.RegisterEnum("pb.ValidateErrorState", ValidateErrorState_name, ValidateErrorState_value) proto.RegisterEnum("pb.ValidationErrOp", ValidationErrOp_name, ValidationErrOp_value) proto.RegisterType((*QueryStatusRequest)(nil), "pb.QueryStatusRequest") proto.RegisterType((*CommonWorkerResponse)(nil), "pb.CommonWorkerResponse") proto.RegisterType((*QueryStatusResponse)(nil), "pb.QueryStatusResponse") proto.RegisterType((*CheckStatus)(nil), "pb.CheckStatus") proto.RegisterType((*DumpStatus)(nil), "pb.DumpStatus") proto.RegisterType((*LoadStatus)(nil), "pb.LoadStatus") proto.RegisterType((*ShardingGroup)(nil), "pb.ShardingGroup") proto.RegisterType((*SyncStatus)(nil), "pb.SyncStatus") proto.RegisterType((*SourceStatus)(nil), "pb.SourceStatus") proto.RegisterType((*RelayStatus)(nil), "pb.RelayStatus") proto.RegisterType((*SubTaskStatus)(nil), "pb.SubTaskStatus") proto.RegisterType((*SubTaskStatusList)(nil), "pb.SubTaskStatusList") proto.RegisterType((*CheckError)(nil), "pb.CheckError") proto.RegisterType((*DumpError)(nil), "pb.DumpError") proto.RegisterType((*LoadError)(nil), "pb.LoadError") proto.RegisterType((*SyncSQLError)(nil), "pb.SyncSQLError") proto.RegisterType((*SyncError)(nil), "pb.SyncError") proto.RegisterType((*SourceError)(nil), "pb.SourceError") proto.RegisterType((*RelayError)(nil), "pb.RelayError") proto.RegisterType((*SubTaskError)(nil), "pb.SubTaskError") proto.RegisterType((*SubTaskErrorList)(nil), "pb.SubTaskErrorList") proto.RegisterType((*ProcessResult)(nil), "pb.ProcessResult") proto.RegisterType((*ProcessError)(nil), "pb.ProcessError") proto.RegisterType((*PurgeRelayRequest)(nil), "pb.PurgeRelayRequest") proto.RegisterType((*OperateWorkerSchemaRequest)(nil), "pb.OperateWorkerSchemaRequest") proto.RegisterType((*V1SubTaskMeta)(nil), "pb.V1SubTaskMeta") proto.RegisterType((*OperateV1MetaRequest)(nil), "pb.OperateV1MetaRequest") proto.RegisterType((*OperateV1MetaResponse)(nil), "pb.OperateV1MetaResponse") proto.RegisterMapType((map[string]*V1SubTaskMeta)(nil), "pb.OperateV1MetaResponse.MetaEntry") proto.RegisterType((*HandleWorkerErrorRequest)(nil), "pb.HandleWorkerErrorRequest") proto.RegisterType((*GetWorkerCfgRequest)(nil), "pb.GetWorkerCfgRequest") proto.RegisterType((*GetWorkerCfgResponse)(nil), "pb.GetWorkerCfgResponse") proto.RegisterType((*CheckSubtasksCanUpdateRequest)(nil), "pb.CheckSubtasksCanUpdateRequest") proto.RegisterType((*CheckSubtasksCanUpdateResponse)(nil), "pb.CheckSubtasksCanUpdateResponse") proto.RegisterType((*GetValidationStatusRequest)(nil), "pb.GetValidationStatusRequest") proto.RegisterType((*ValidationStatus)(nil), "pb.ValidationStatus") proto.RegisterType((*ValidationTableStatus)(nil), "pb.ValidationTableStatus") proto.RegisterType((*GetValidationStatusResponse)(nil), "pb.GetValidationStatusResponse") proto.RegisterType((*GetValidationErrorRequest)(nil), "pb.GetValidationErrorRequest") proto.RegisterType((*ValidationError)(nil), "pb.ValidationError") proto.RegisterType((*GetValidationErrorResponse)(nil), "pb.GetValidationErrorResponse") proto.RegisterType((*OperateValidationErrorRequest)(nil), "pb.OperateValidationErrorRequest") proto.RegisterType((*OperateValidationErrorResponse)(nil), "pb.OperateValidationErrorResponse") proto.RegisterType((*UpdateValidationWorkerRequest)(nil), "pb.UpdateValidationWorkerRequest") } func init() { proto.RegisterFile("dmworker.proto", fileDescriptor_51a1b9e17fd67b10) } var fileDescriptor_51a1b9e17fd67b10 = []byte{ // 2972 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x3a, 0x4d, 0x6f, 0xdc, 0xd6, 0xb5, 0x43, 0x72, 0x3e, 0xcf, 0x8c, 0x24, 0xea, 0x4a, 0xf6, 0x9b, 0x28, 0xf6, 0x44, 0xa1, 0x83, 0x3c, 0x45, 0xc8, 0x13, 0x12, 0xbd, 0x3c, 0xe4, 0x21, 0x40, 0x9b, 0xc4, 0x92, 0x23, 0x3b, 0x95, 0x23, 0x9b, 0x52, 0xdc, 0x55, 0x81, 0x52, 0x9c, 0xab, 0x11, 0x2b, 0x0e, 0x49, 0x93, 0x1c, 0x09, 0x5a, 0x14, 0xdd, 0x75, 0xdb, 0x6e, 0x5a, 0xa0, 0x45, 0x37, 0x2d, 0xd0, 0x6d, 0x17, 0xfd, 0x01, 0xdd, 0xb5, 0xcd, 0x32, 0xe8, 0xaa, 0xab, 0xa2, 0x48, 0xfe, 0x45, 0x17, 0x45, 0x71, 0xce, 0xbd, 0x97, 0xbc, 0x9c, 0x0f, 0x39, 0x2e, 0xd0, 0x1d, 0xcf, 0xc7, 0x3d, 0xf7, 0xf0, 0x7c, 0x1f, 0xce, 0xc0, 0xf2, 0x70, 0x7c, 0x15, 0xa7, 0x17, 0x3c, 0xdd, 0x49, 0xd2, 0x38, 0x8f, 0x99, 0x99, 0x9c, 0x3a, 0x5b, 0xc0, 0x9e, 0x4e, 0x78, 0x7a, 0x7d, 0x9c, 0x7b, 0xf9, 0x24, 0x73, 0xf9, 0xf3, 0x09, 0xcf, 0x72, 0xc6, 0xa0, 0x1e, 0x79, 0x63, 0xde, 0x37, 0x36, 0x8d, 0xad, 0x8e, 0x4b, 0xcf, 0x4e, 0x02, 0xeb, 0x7b, 0xf1, 0x78, 0x1c, 0x47, 0xdf, 0x25, 0x19, 0x2e, 0xcf, 0x92, 0x38, 0xca, 0x38, 0xbb, 0x0d, 0xcd, 0x94, 0x67, 0x93, 0x30, 0x27, 0xee, 0xb6, 0x2b, 0x21, 0x66, 0x83, 0x35, 0xce, 0x46, 0x7d, 0x93, 0x44, 0xe0, 0x23, 0x72, 0x66, 0xf1, 0x24, 0xf5, 0x79, 0xdf, 0x22, 0xa4, 0x84, 0x10, 0x2f, 0xf4, 0xea, 0xd7, 0x05, 0x5e, 0x40, 0xce, 0xef, 0x0c, 0x58, 0xab, 0x28, 0xf7, 0xd2, 0x37, 0xbe, 0x07, 0x3d, 0x71, 0x87, 0x90, 0x40, 0xf7, 0x76, 0x77, 0xed, 0x9d, 0xe4, 0x74, 0xe7, 0x58, 0xc3, 0xbb, 0x15, 0x2e, 0xf6, 0x3e, 0x2c, 0x65, 0x93, 0xd3, 0x13, 0x2f, 0xbb, 0x90, 0xc7, 0xea, 0x9b, 0xd6, 0x56, 0x77, 0x77, 0x95, 0x8e, 0xe9, 0x04, 0xb7, 0xca, 0xe7, 0xfc, 0xd6, 0x80, 0xee, 0xde, 0x39, 0xf7, 0x25, 0x8c, 0x8a, 0x26, 0x5e, 0x96, 0xf1, 0xa1, 0x52, 0x54, 0x40, 0x6c, 0x1d, 0x1a, 0x79, 0x9c, 0x7b, 0x21, 0xa9, 0xda, 0x70, 0x05, 0xc0, 0x06, 0x00, 0xd9, 0xc4, 0xf7, 0x79, 0x96, 0x9d, 0x4d, 0x42, 0x52, 0xb5, 0xe1, 0x6a, 0x18, 0x94, 0x76, 0xe6, 0x05, 0x21, 0x1f, 0x92, 0x99, 0x1a, 0xae, 0x84, 0x58, 0x1f, 0x5a, 0x57, 0x5e, 0x1a, 0x05, 0xd1, 0xa8, 0xdf, 0x20, 0x82, 0x02, 0xf1, 0xc4, 0x90, 0xe7, 0x5e, 0x10, 0xf6, 0x9b, 0x9b, 0xc6, 0x56, 0xcf, 0x95, 0x90, 0xf3, 0x4f, 0x03, 0x60, 0x7f, 0x32, 0x4e, 0xa4, 0x9a, 0x9b, 0xd0, 0x25, 0x0d, 0x4e, 0xbc, 0xd3, 0x90, 0x67, 0xa4, 0xab, 0xe5, 0xea, 0x28, 0xb6, 0x05, 0x2b, 0x7e, 0x3c, 0x4e, 0x42, 0x9e, 0xf3, 0xa1, 0xe4, 0x42, 0xd5, 0x0d, 0x77, 0x1a, 0xcd, 0xde, 0x80, 0xa5, 0xb3, 0x20, 0x0a, 0xb2, 0x73, 0x3e, 0xbc, 0x7f, 0x9d, 0x73, 0x61, 0x72, 0xc3, 0xad, 0x22, 0x99, 0x03, 0x3d, 0x85, 0x70, 0xe3, 0xab, 0x8c, 0x5e, 0xc8, 0x70, 0x2b, 0x38, 0xf6, 0x36, 0xac, 0xf2, 0x2c, 0x0f, 0xc6, 0x5e, 0xce, 0x4f, 0x50, 0x15, 0x62, 0x6c, 0x10, 0xe3, 0x2c, 0x01, 0x7d, 0x7f, 0x9a, 0x64, 0xf4, 0x9e, 0x96, 0x8b, 0x8f, 0x6c, 0x03, 0xda, 0x49, 0x1a, 0x8f, 0x52, 0x9e, 0x65, 0xfd, 0x16, 0x85, 0x44, 0x01, 0x3b, 0x5f, 0x18, 0x00, 0x87, 0xb1, 0x37, 0x94, 0x06, 0x98, 0x51, 0x5a, 0x98, 0x60, 0x4a, 0xe9, 0x01, 0x00, 0xd9, 0x44, 0xb0, 0x98, 0xc4, 0xa2, 0x61, 0x2a, 0x17, 0x5a, 0xd5, 0x0b, 0xf1, 0xec, 0x98, 0xe7, 0xde, 0xfd, 0x20, 0x0a, 0xe3, 0x91, 0x0c, 0x73, 0x0d, 0xc3, 0xde, 0x84, 0xe5, 0x12, 0x3a, 0x38, 0x79, 0xb4, 0x4f, 0x6f, 0xda, 0x71, 0xa7, 0xb0, 0xb3, 0xaf, 0xe9, 0xfc, 0xcc, 0x80, 0xa5, 0xe3, 0x73, 0x2f, 0x1d, 0x06, 0xd1, 0xe8, 0x20, 0x8d, 0x27, 0x09, 0x7a, 0x3d, 0xf7, 0xd2, 0x11, 0xcf, 0x65, 0xfa, 0x4a, 0x08, 0x93, 0x7a, 0x7f, 0xff, 0x10, 0x35, 0xb7, 0x30, 0xa9, 0xf1, 0x59, 0xbc, 0x79, 0x9a, 0xe5, 0x87, 0xb1, 0xef, 0xe5, 0x41, 0x1c, 0x49, 0xc5, 0xab, 0x48, 0x4a, 0xdc, 0xeb, 0xc8, 0xa7, 0xc8, 0xb3, 0x28, 0x71, 0x09, 0xc2, 0x37, 0x9e, 0x44, 0x92, 0xd2, 0x20, 0x4a, 0x01, 0x3b, 0x7f, 0x6c, 0x00, 0x1c, 0x5f, 0x47, 0xfe, 0x54, 0x8c, 0x3d, 0xb8, 0xe4, 0x51, 0x5e, 0x8d, 0x31, 0x81, 0x42, 0x61, 0x22, 0xe4, 0x12, 0x65, 0xdc, 0x02, 0x66, 0x77, 0xa0, 0x93, 0x72, 0x9f, 0x47, 0x39, 0x12, 0x2d, 0x22, 0x96, 0x08, 0x8c, 0xa6, 0xb1, 0x97, 0xe5, 0x3c, 0xad, 0x98, 0xb7, 0x82, 0x63, 0xdb, 0x60, 0xeb, 0xf0, 0x41, 0x1e, 0x0c, 0xa5, 0x89, 0x67, 0xf0, 0x28, 0x8f, 0x5e, 0x42, 0xc9, 0x6b, 0x0a, 0x79, 0x3a, 0x0e, 0xe5, 0xe9, 0x30, 0xc9, 0x13, 0x51, 0x36, 0x83, 0x47, 0x79, 0xa7, 0x61, 0xec, 0x5f, 0x04, 0xd1, 0x88, 0x1c, 0xd0, 0x26, 0x53, 0x55, 0x70, 0xec, 0x5b, 0x60, 0x4f, 0xa2, 0x94, 0x67, 0x71, 0x78, 0xc9, 0x87, 0xe4, 0xc7, 0xac, 0xdf, 0xd1, 0xca, 0x8e, 0xee, 0x61, 0x77, 0x86, 0x55, 0xf3, 0x10, 0x88, 0x4a, 0x23, 0x3d, 0x34, 0x00, 0x38, 0x25, 0x45, 0x4e, 0xae, 0x13, 0xde, 0xef, 0x8a, 0xb8, 0x2b, 0x31, 0xec, 0x1d, 0x58, 0xcb, 0xb8, 0x1f, 0x47, 0xc3, 0xec, 0x3e, 0x3f, 0x0f, 0xa2, 0xe1, 0x63, 0xb2, 0x45, 0xbf, 0x47, 0x26, 0x9e, 0x47, 0xc2, 0x88, 0x21, 0xc5, 0xf7, 0xf7, 0x0f, 0x8f, 0xae, 0x22, 0x9e, 0xf6, 0x97, 0x44, 0xc4, 0x54, 0x90, 0xe8, 0x6e, 0x3f, 0x8e, 0xce, 0xc2, 0xc0, 0xcf, 0x1f, 0x67, 0xa3, 0xfe, 0x32, 0xf1, 0xe8, 0x28, 0x74, 0x69, 0x5e, 0xa4, 0xf5, 0x8a, 0x70, 0x69, 0x81, 0x28, 0x82, 0xc1, 0x4d, 0xb2, 0xbe, 0xad, 0x05, 0x83, 0xab, 0x07, 0x03, 0x12, 0x57, 0xf5, 0x60, 0x70, 0x45, 0x30, 0x04, 0xf1, 0x49, 0x99, 0xa7, 0x6c, 0xd3, 0xd8, 0xaa, 0xbb, 0x15, 0x1c, 0x3a, 0x6f, 0x38, 0x19, 0x27, 0x8f, 0x8e, 0x34, 0xbe, 0x35, 0xe2, 0x9b, 0xc1, 0x3b, 0xbf, 0x32, 0xa0, 0xa7, 0xf7, 0x0a, 0xad, 0x8b, 0x19, 0x0b, 0xba, 0x98, 0xa9, 0x77, 0x31, 0xf6, 0x56, 0xd1, 0xad, 0x44, 0xf7, 0x21, 0x7f, 0x3e, 0x49, 0x63, 0x2c, 0xeb, 0x2e, 0x11, 0x8a, 0x06, 0xf6, 0x2e, 0x74, 0x53, 0x1e, 0x7a, 0xd7, 0x45, 0xdb, 0x41, 0xfe, 0x15, 0xe4, 0x77, 0x4b, 0xb4, 0xab, 0xf3, 0x38, 0x7f, 0x36, 0xa1, 0xab, 0x11, 0x67, 0x72, 0xc1, 0xf8, 0x86, 0xb9, 0x60, 0x2e, 0xc8, 0x85, 0x4d, 0xa5, 0xd2, 0xe4, 0x74, 0x3f, 0x48, 0x65, 0x79, 0xd0, 0x51, 0x05, 0x47, 0x25, 0xf9, 0x74, 0x14, 0x76, 0x0f, 0x0d, 0xd4, 0x52, 0x6f, 0x1a, 0xcd, 0x76, 0x80, 0x11, 0x6a, 0xcf, 0xcb, 0xfd, 0xf3, 0xcf, 0x13, 0x19, 0x8d, 0x4d, 0x0a, 0xe9, 0x39, 0x14, 0xf6, 0x1a, 0x34, 0xb2, 0xdc, 0x1b, 0x71, 0x4a, 0xbd, 0xe5, 0xdd, 0x0e, 0xa5, 0x0a, 0x22, 0x5c, 0x81, 0xd7, 0x8c, 0xdf, 0x7e, 0x81, 0xf1, 0x9d, 0xdf, 0x5b, 0xb0, 0x54, 0xe9, 0xee, 0xf3, 0xa6, 0xa0, 0xf2, 0x46, 0x73, 0xc1, 0x8d, 0x9b, 0x50, 0x9f, 0x44, 0x81, 0x70, 0xf6, 0xf2, 0x6e, 0x0f, 0xe9, 0x9f, 0x47, 0x41, 0x8e, 0xd9, 0xe6, 0x12, 0x45, 0xd3, 0xa9, 0xfe, 0xa2, 0x80, 0x78, 0x07, 0xd6, 0xca, 0x54, 0xdf, 0xdf, 0x3f, 0x3c, 0x8c, 0xfd, 0x8b, 0xa2, 0x37, 0xcc, 0x23, 0x31, 0x26, 0x66, 0x20, 0x2a, 0x59, 0x0f, 0x6b, 0x62, 0x0a, 0xfa, 0x6f, 0x68, 0xf8, 0x38, 0x95, 0x90, 0x95, 0x64, 0x40, 0x69, 0x63, 0xca, 0xc3, 0x9a, 0x2b, 0xe8, 0xec, 0x0d, 0xa8, 0x63, 0xfc, 0x4b, 0x5b, 0x2d, 0x23, 0x5f, 0x39, 0x26, 0x3c, 0xac, 0xb9, 0x44, 0x45, 0xae, 0x30, 0xf6, 0x86, 0xfd, 0x4e, 0xc9, 0x55, 0xf6, 0x52, 0xe4, 0x42, 0x2a, 0x72, 0x61, 0x0d, 0xa2, 0x7a, 0x24, 0xb9, 0xca, 0x76, 0x80, 0x5c, 0x48, 0x65, 0xef, 0x01, 0x5c, 0x7a, 0x61, 0x30, 0x14, 0xcd, 0xa7, 0x4b, 0xbc, 0xeb, 0xc8, 0xfb, 0xac, 0xc0, 0xca, 0xa8, 0xd7, 0xf8, 0xee, 0xb7, 0xa1, 0x99, 0x89, 0xf0, 0xff, 0x36, 0xac, 0x56, 0x7c, 0x76, 0x18, 0x64, 0x64, 0x60, 0x41, 0xee, 0x1b, 0x8b, 0x06, 0x37, 0x75, 0x7e, 0x00, 0x40, 0x96, 0x78, 0x90, 0xa6, 0x71, 0xaa, 0x06, 0x48, 0xa3, 0x18, 0x20, 0x9d, 0xbb, 0xd0, 0x41, 0x0b, 0xdc, 0x40, 0xc6, 0x57, 0x5f, 0x44, 0x4e, 0xa0, 0x47, 0xef, 0xfc, 0xf4, 0x70, 0x01, 0x07, 0xdb, 0x85, 0x75, 0x31, 0xc5, 0x89, 0x24, 0x78, 0x12, 0x67, 0x01, 0x59, 0x42, 0xa4, 0xe3, 0x5c, 0x1a, 0xd6, 0x46, 0x8e, 0xe2, 0x8e, 0x9f, 0x1e, 0xaa, 0x39, 0x43, 0xc1, 0xce, 0xff, 0x41, 0x07, 0x6f, 0x14, 0xd7, 0x6d, 0x41, 0x93, 0x08, 0xca, 0x0e, 0x76, 0xe1, 0x04, 0xa9, 0x90, 0x2b, 0xe9, 0xce, 0x4f, 0x0c, 0xe8, 0x8a, 0x22, 0x27, 0x4e, 0xbe, 0x6c, 0x8d, 0xdb, 0xac, 0x1c, 0x57, 0x55, 0x42, 0x97, 0xb8, 0x03, 0x40, 0x65, 0x4a, 0x30, 0xd4, 0xcb, 0xa0, 0x28, 0xb1, 0xae, 0xc6, 0x81, 0x8e, 0x29, 0xa1, 0x39, 0xa6, 0xfd, 0x85, 0x09, 0x3d, 0xe9, 0x52, 0xc1, 0xf2, 0x1f, 0x4a, 0x56, 0x99, 0x4f, 0x75, 0x3d, 0x9f, 0xde, 0x54, 0xf9, 0xd4, 0x28, 0x5f, 0xa3, 0x8c, 0xa2, 0x32, 0x9d, 0xee, 0xc9, 0x74, 0x6a, 0x12, 0xdb, 0x92, 0x4a, 0x27, 0xc5, 0x25, 0xb2, 0xe9, 0x9e, 0xcc, 0xa6, 0x56, 0xc9, 0x54, 0x84, 0x54, 0x91, 0x4c, 0xf7, 0x64, 0x32, 0xb5, 0x4b, 0xa6, 0xc2, 0xcd, 0x2a, 0x97, 0xee, 0xb7, 0xa0, 0x41, 0xee, 0x74, 0x3e, 0x00, 0x5b, 0x37, 0x0d, 0xe5, 0xc4, 0x9b, 0x92, 0x58, 0x09, 0x05, 0x8d, 0xc9, 0x95, 0x67, 0x9f, 0xc3, 0x52, 0xa5, 0x14, 0xe1, 0x04, 0x11, 0x64, 0x7b, 0x5e, 0xe4, 0xf3, 0xb0, 0xd8, 0x63, 0x34, 0x8c, 0x16, 0x64, 0x66, 0x29, 0x59, 0x8a, 0xa8, 0x04, 0x99, 0xb6, 0x8d, 0x58, 0x95, 0x6d, 0xe4, 0x2f, 0x06, 0xf4, 0xf4, 0x03, 0xb8, 0xd0, 0x3c, 0x48, 0xd3, 0xbd, 0x78, 0x28, 0xbc, 0xd9, 0x70, 0x15, 0x88, 0xa1, 0x8f, 0x8f, 0xa1, 0x97, 0x65, 0x32, 0x02, 0x0b, 0x58, 0xd2, 0x8e, 0xfd, 0x38, 0x51, 0xfb, 0x65, 0x01, 0x4b, 0xda, 0x21, 0xbf, 0xe4, 0xa1, 0x6c, 0x50, 0x05, 0x8c, 0xb7, 0x3d, 0xe6, 0x59, 0x86, 0x61, 0x22, 0xea, 0xaa, 0x02, 0xf1, 0x94, 0xeb, 0x5d, 0xed, 0x79, 0x93, 0x8c, 0xcb, 0x19, 0xb0, 0x80, 0xd1, 0x2c, 0xb8, 0x07, 0x7b, 0x69, 0x3c, 0x89, 0xd4, 0xe4, 0xa7, 0x61, 0x9c, 0x2b, 0x58, 0x7d, 0x32, 0x49, 0x47, 0x9c, 0x82, 0x58, 0xad, 0xd5, 0x1b, 0xd0, 0x0e, 0x22, 0xcf, 0xcf, 0x83, 0x4b, 0x2e, 0x2d, 0x59, 0xc0, 0x18, 0xbf, 0x79, 0x30, 0xe6, 0x72, 0xf4, 0xa5, 0x67, 0xe4, 0x3f, 0x0b, 0x42, 0x4e, 0x71, 0x2d, 0x5f, 0x49, 0xc1, 0x94, 0xa2, 0xa2, 0x27, 0xcb, 0xa5, 0x59, 0x40, 0xce, 0x2f, 0x4d, 0xd8, 0x38, 0x4a, 0x78, 0xea, 0xe5, 0x5c, 0x2c, 0xea, 0xc7, 0xfe, 0x39, 0x1f, 0x7b, 0x4a, 0x85, 0x3b, 0x60, 0xc6, 0x09, 0x5d, 0x2e, 0xe3, 0x5d, 0x90, 0x8f, 0x12, 0xd7, 0x8c, 0x13, 0x52, 0xc2, 0xcb, 0x2e, 0xa4, 0x6d, 0xe9, 0x79, 0xe1, 0xd6, 0xbe, 0x01, 0xed, 0xa1, 0x97, 0x7b, 0xa7, 0x5e, 0xc6, 0x95, 0x4d, 0x15, 0x4c, 0x0b, 0x2e, 0xee, 0x83, 0xd2, 0xa2, 0x02, 0x20, 0x49, 0x74, 0x9b, 0xb4, 0xa6, 0x84, 0x90, 0xfb, 0x2c, 0x9c, 0x64, 0xe7, 0x64, 0xc6, 0xb6, 0x2b, 0x00, 0xd4, 0xa5, 0x88, 0xf9, 0xb6, 0x6c, 0x17, 0x03, 0x80, 0xb3, 0x34, 0x1e, 0x8b, 0xc2, 0x42, 0x0d, 0xa8, 0xed, 0x6a, 0x18, 0x45, 0x3f, 0x11, 0xeb, 0x0f, 0x94, 0x74, 0x81, 0x71, 0x72, 0x58, 0x7a, 0xf6, 0xae, 0x0c, 0xfb, 0xc7, 0x3c, 0xf7, 0xd8, 0x86, 0x66, 0x0e, 0x40, 0x73, 0x20, 0x45, 0x1a, 0xe3, 0x85, 0xd5, 0x43, 0x95, 0x1c, 0x4b, 0x2b, 0x39, 0xca, 0x82, 0x75, 0x0a, 0x71, 0x7a, 0x76, 0xde, 0x83, 0x75, 0xe9, 0x91, 0x67, 0xef, 0xe2, 0xad, 0x0b, 0x7d, 0x21, 0xc8, 0xe2, 0x7a, 0xe7, 0x4f, 0x06, 0xdc, 0x9a, 0x3a, 0xf6, 0xd2, 0xdf, 0x3f, 0xde, 0x87, 0x3a, 0x2e, 0x90, 0x7d, 0x8b, 0x52, 0xf3, 0x1e, 0xde, 0x31, 0x57, 0xe4, 0x0e, 0x02, 0x0f, 0xa2, 0x3c, 0xbd, 0x76, 0xe9, 0xc0, 0xc6, 0xa7, 0xd0, 0x29, 0x50, 0x28, 0xf7, 0x82, 0x5f, 0xab, 0xea, 0x7b, 0xc1, 0xaf, 0x71, 0xa2, 0xb8, 0xf4, 0xc2, 0x89, 0x30, 0x8d, 0x6c, 0xb0, 0x15, 0xc3, 0xba, 0x82, 0xfe, 0x81, 0xf9, 0xff, 0x86, 0xf3, 0x43, 0xe8, 0x3f, 0xf4, 0xa2, 0x61, 0x28, 0xe3, 0x51, 0x14, 0x05, 0x69, 0x82, 0x57, 0x35, 0x13, 0x74, 0x51, 0x0a, 0x51, 0x6f, 0x88, 0xc6, 0x3b, 0xd0, 0x39, 0x55, 0xed, 0x50, 0x1a, 0xbe, 0x44, 0x50, 0xcc, 0x3c, 0x0f, 0x33, 0xb9, 0xa6, 0xd2, 0xb3, 0x73, 0x0b, 0xd6, 0x0e, 0x78, 0x2e, 0xee, 0xde, 0x3b, 0x1b, 0xc9, 0x9b, 0x9d, 0x2d, 0x58, 0xaf, 0xa2, 0xa5, 0x71, 0x6d, 0xb0, 0xfc, 0xb3, 0xa2, 0xd5, 0xf8, 0x67, 0x23, 0xe7, 0x18, 0xee, 0x8a, 0x69, 0x69, 0x72, 0x8a, 0x2a, 0x60, 0xe9, 0xfb, 0x3c, 0x19, 0x7a, 0x39, 0x57, 0x2f, 0xb1, 0x0b, 0xeb, 0x99, 0xa0, 0xed, 0x9d, 0x8d, 0x4e, 0xe2, 0x71, 0x78, 0x9c, 0xa7, 0x41, 0xa4, 0x64, 0xcc, 0xa5, 0x39, 0x87, 0x30, 0x58, 0x24, 0x54, 0x2a, 0xd2, 0x87, 0x96, 0xfc, 0xf8, 0x23, 0xdd, 0xac, 0xc0, 0x59, 0x3f, 0x3b, 0x23, 0xd8, 0x38, 0xe0, 0xf9, 0xcc, 0xcc, 0x54, 0x96, 0x1d, 0xbc, 0xe3, 0xb3, 0xb2, 0x3d, 0x16, 0x30, 0xfb, 0x1f, 0xe8, 0x9d, 0x05, 0x61, 0xce, 0x53, 0xb9, 0x73, 0xcc, 0xc4, 0x7a, 0x85, 0xec, 0xfc, 0xcd, 0x02, 0x7b, 0xfa, 0x9a, 0xc2, 0x4f, 0xc6, 0xdc, 0xaa, 0x61, 0x56, 0xaa, 0x06, 0x83, 0xfa, 0x18, 0x0b, 0xbb, 0xcc, 0x19, 0x7c, 0x2e, 0x13, 0xad, 0xbe, 0x20, 0xd1, 0xb6, 0x60, 0x45, 0x4e, 0x7f, 0xb1, 0xda, 0x6b, 0xe4, 0x02, 0x31, 0x85, 0xc6, 0x81, 0x79, 0x0a, 0x45, 0xeb, 0x86, 0xa8, 0x37, 0xf3, 0x48, 0xda, 0x34, 0xde, 0xfa, 0x06, 0xd3, 0x78, 0x22, 0x08, 0xe2, 0x13, 0x95, 0x34, 0x59, 0x5b, 0x08, 0x9f, 0x43, 0x62, 0x6f, 0xc3, 0x6a, 0xc2, 0x23, 0x5c, 0xdc, 0x35, 0xfe, 0x0e, 0xf1, 0xcf, 0x12, 0xf0, 0x35, 0xa9, 0x55, 0x6a, 0xbc, 0x20, 0x5e, 0x73, 0x0a, 0x8d, 0x1b, 0x9c, 0x3f, 0xc9, 0xe3, 0x4b, 0xb5, 0xaa, 0x61, 0x32, 0x88, 0xe5, 0x7e, 0x06, 0x8f, 0x3a, 0x54, 0x70, 0x64, 0x90, 0x9e, 0xd0, 0x61, 0x86, 0xe0, 0xfc, 0xc6, 0x80, 0x5b, 0xa5, 0x83, 0xe9, 0xa3, 0xde, 0x0b, 0xf6, 0xde, 0x0d, 0x68, 0x67, 0xa9, 0x4f, 0x9c, 0xaa, 0x27, 0x2b, 0x98, 0x7a, 0x44, 0x96, 0x0b, 0x9a, 0x6c, 0x60, 0x0a, 0x7e, 0xb1, 0xd7, 0xfb, 0xd0, 0x1a, 0x57, 0x1b, 0xb3, 0x04, 0x9d, 0x3f, 0x18, 0xf0, 0xea, 0xdc, 0x78, 0xff, 0x37, 0x3e, 0x10, 0x43, 0x11, 0x14, 0x99, 0x2c, 0x93, 0x37, 0xef, 0x1f, 0x38, 0xc9, 0x7c, 0x08, 0x4b, 0x79, 0x69, 0x19, 0xae, 0x3e, 0x10, 0xbf, 0x52, 0x3d, 0xa8, 0x19, 0xcf, 0xad, 0xf2, 0x3b, 0x17, 0xf0, 0x4a, 0x45, 0xff, 0x4a, 0x4d, 0xdc, 0xa5, 0xf9, 0x1e, 0x79, 0xb9, 0xac, 0x8c, 0xb7, 0x35, 0xc1, 0x62, 0x9e, 0x26, 0xaa, 0x5b, 0xf0, 0x55, 0x52, 0xdc, 0xac, 0xa6, 0xb8, 0xf3, 0x6b, 0x13, 0x56, 0xa6, 0xae, 0x62, 0xcb, 0x60, 0x06, 0x43, 0xe9, 0x48, 0x33, 0x18, 0x2e, 0x4c, 0x57, 0xdd, 0xb9, 0xd6, 0x94, 0x73, 0xb1, 0x40, 0xa5, 0xfe, 0xbe, 0x97, 0x7b, 0xb2, 0xff, 0x2b, 0xb0, 0xe2, 0xf6, 0xc6, 0x94, 0xdb, 0xfb, 0xd0, 0x1a, 0x66, 0x39, 0x9d, 0x12, 0x59, 0xa9, 0x40, 0x2c, 0xed, 0x14, 0xe7, 0xf4, 0xa9, 0x4a, 0x4c, 0x54, 0x25, 0x82, 0xed, 0x14, 0x4b, 0x5d, 0xfb, 0x46, 0x9b, 0x48, 0xae, 0x62, 0x9e, 0xea, 0xc8, 0xa2, 0x84, 0xf3, 0x94, 0x16, 0x51, 0x50, 0x8d, 0xa8, 0xe7, 0x53, 0x05, 0x54, 0x3a, 0xe4, 0xa5, 0xe3, 0xe9, 0x2d, 0x35, 0x66, 0x8b, 0x50, 0x5a, 0xab, 0x46, 0x44, 0x65, 0xd2, 0xfe, 0xb9, 0x01, 0x77, 0x55, 0x33, 0x9e, 0x1f, 0x08, 0xf7, 0xb4, 0xe6, 0x38, 0x2b, 0x49, 0x36, 0x49, 0x9a, 0xcf, 0x3f, 0x0e, 0x43, 0xb1, 0x58, 0x99, 0x6a, 0x3e, 0x57, 0x98, 0x4a, 0x64, 0x58, 0x53, 0xc5, 0x7f, 0x9d, 0xb4, 0x7d, 0x24, 0x7e, 0x50, 0xa8, 0xbb, 0x02, 0x70, 0x3e, 0x85, 0xc1, 0x22, 0xbd, 0x5e, 0xd6, 0x1e, 0xce, 0x35, 0xdc, 0x15, 0x6d, 0xad, 0x14, 0xa5, 0x7e, 0x3e, 0x7a, 0x71, 0x6f, 0xaa, 0xf4, 0x7a, 0x73, 0xba, 0xd7, 0x17, 0x9f, 0x36, 0xe9, 0x73, 0xb9, 0xa5, 0x7f, 0xda, 0x44, 0xcc, 0xf6, 0x05, 0x34, 0xc5, 0x30, 0xc7, 0x96, 0xa0, 0xf3, 0x28, 0xa2, 0xf4, 0x3d, 0x4a, 0xec, 0x1a, 0x6b, 0x43, 0xfd, 0x38, 0x8f, 0x13, 0xdb, 0x60, 0x1d, 0x68, 0x3c, 0xc1, 0x69, 0xde, 0x36, 0x19, 0x40, 0x13, 0xab, 0xfd, 0x98, 0xdb, 0x16, 0xa2, 0x8f, 0x73, 0x2f, 0xcd, 0xed, 0x3a, 0xa2, 0x85, 0xfe, 0x76, 0x83, 0x2d, 0x03, 0x7c, 0x3c, 0xc9, 0x63, 0xc9, 0xd6, 0x44, 0xda, 0x3e, 0x0f, 0x79, 0xce, 0xed, 0xd6, 0xf6, 0x8f, 0xe8, 0xc8, 0x08, 0xc7, 0x87, 0x9e, 0xbc, 0x8b, 0x60, 0xbb, 0xc6, 0x5a, 0x60, 0x7d, 0xc6, 0xaf, 0x6c, 0x83, 0x75, 0xa1, 0xe5, 0x4e, 0xa2, 0x28, 0x88, 0x46, 0xe2, 0x3e, 0xba, 0x7a, 0x68, 0x5b, 0x48, 0x40, 0x85, 0x12, 0x3e, 0xb4, 0xeb, 0xac, 0x07, 0xed, 0x4f, 0xe4, 0xcf, 0x0e, 0x76, 0x03, 0x49, 0xc8, 0x86, 0x67, 0x9a, 0x48, 0xa2, 0xcb, 0x11, 0x6a, 0x21, 0x44, 0xa7, 0x10, 0x6a, 0x6f, 0x1f, 0x41, 0x5b, 0x6d, 0xae, 0x6c, 0x05, 0xba, 0x52, 0x07, 0x44, 0xd9, 0x35, 0x7c, 0x21, 0x1a, 0x36, 0x6c, 0x03, 0x5f, 0x1e, 0x77, 0x50, 0xdb, 0xc4, 0x27, 0x5c, 0x34, 0x6d, 0x8b, 0x0c, 0x72, 0x1d, 0xf9, 0x76, 0x1d, 0x19, 0x69, 0x61, 0xb1, 0x87, 0xdb, 0x8f, 0xa1, 0x45, 0x8f, 0x47, 0x38, 0x87, 0x2d, 0x4b, 0x79, 0x12, 0x63, 0xd7, 0xd0, 0xa6, 0x78, 0xbb, 0xe0, 0x36, 0xd0, 0x36, 0xf4, 0x3a, 0x02, 0x36, 0x51, 0x05, 0x61, 0x27, 0x81, 0xb0, 0xb6, 0x7f, 0x6c, 0x40, 0x5b, 0xad, 0x1a, 0x6c, 0x0d, 0x56, 0x94, 0x91, 0x24, 0x4a, 0x48, 0x3c, 0xe0, 0xb9, 0x40, 0xd8, 0x06, 0x5d, 0x50, 0x80, 0x26, 0xda, 0xd5, 0xe5, 0xe3, 0xf8, 0x92, 0x4b, 0x8c, 0x85, 0x57, 0xe2, 0x66, 0x2b, 0xe1, 0x3a, 0x1e, 0x40, 0x98, 0xaa, 0x8c, 0xdd, 0x60, 0xb7, 0x81, 0x21, 0xf8, 0x38, 0x18, 0x61, 0x24, 0x8b, 0xf9, 0x3f, 0xb3, 0x9b, 0xdb, 0x1f, 0x41, 0x5b, 0x8d, 0xd9, 0x9a, 0x1e, 0x0a, 0x55, 0xe8, 0x21, 0x10, 0xb6, 0x51, 0x5e, 0x2c, 0x31, 0xe6, 0xf6, 0x33, 0x5a, 0x4f, 0x71, 0x4a, 0xd5, 0x2c, 0x23, 0x31, 0x32, 0xbc, 0x2e, 0x82, 0x44, 0x3a, 0x9c, 0x27, 0xa1, 0xe7, 0x17, 0x01, 0x76, 0xc9, 0xd3, 0xdc, 0xb6, 0xf0, 0xf9, 0x51, 0xf4, 0x03, 0xee, 0x63, 0x84, 0xa1, 0x1b, 0x82, 0x2c, 0xb7, 0x1b, 0xdb, 0x87, 0xd0, 0x7d, 0xa6, 0x7a, 0xcc, 0x51, 0x82, 0x2f, 0xa0, 0x94, 0x2b, 0xb1, 0x76, 0x0d, 0xef, 0xa4, 0xe8, 0x2c, 0xb0, 0xb6, 0xc1, 0x56, 0x61, 0x09, 0xbd, 0x51, 0xa2, 0xcc, 0xed, 0xa7, 0xc0, 0x66, 0xab, 0x23, 0x1a, 0xad, 0x54, 0xd8, 0xae, 0xa1, 0x26, 0x9f, 0xf1, 0x2b, 0x7c, 0x26, 0x1f, 0x3e, 0x1a, 0x45, 0x71, 0xca, 0x89, 0xa6, 0x7c, 0x48, 0xdf, 0x17, 0x11, 0x61, 0x6d, 0x3f, 0x9b, 0xea, 0x23, 0x47, 0x89, 0x16, 0xee, 0x04, 0xdb, 0x35, 0x0a, 0x3e, 0x92, 0x22, 0x10, 0xd2, 0x80, 0x24, 0x46, 0x60, 0x4c, 0xbc, 0x68, 0x2f, 0xe4, 0x5e, 0x2a, 0x60, 0x6b, 0xf7, 0x1f, 0x4d, 0x68, 0x8a, 0xaa, 0xc0, 0x3e, 0x82, 0xae, 0xf6, 0x8b, 0x2f, 0xa3, 0x22, 0x3f, 0xfb, 0xfb, 0xf4, 0xc6, 0x7f, 0xcd, 0xe0, 0x45, 0x65, 0x72, 0x6a, 0xec, 0x43, 0x80, 0x72, 0xf1, 0x66, 0xb7, 0x68, 0x9a, 0x9b, 0x5e, 0xc4, 0x37, 0xfa, 0xf4, 0xc9, 0x66, 0xce, 0xaf, 0xd9, 0x4e, 0x8d, 0x7d, 0x07, 0x96, 0x64, 0xf9, 0x13, 0xa1, 0xc5, 0x06, 0xda, 0xda, 0x34, 0x67, 0xa5, 0xbe, 0x51, 0xd8, 0x27, 0x85, 0x30, 0x11, 0x3e, 0xac, 0x3f, 0x67, 0x07, 0x13, 0x62, 0x5e, 0x59, 0xb8, 0x9d, 0x39, 0x35, 0x76, 0x00, 0x5d, 0xb1, 0x43, 0x89, 0xa2, 0x7e, 0x07, 0x79, 0x17, 0x2d, 0x55, 0x37, 0x2a, 0xb4, 0x07, 0x3d, 0x7d, 0xed, 0x61, 0x64, 0xc9, 0x39, 0xfb, 0x91, 0x10, 0x32, 0x6f, 0x43, 0x72, 0x6a, 0xcc, 0x83, 0xdb, 0xf3, 0x97, 0x17, 0xf6, 0x7a, 0xf9, 0x6d, 0x79, 0xc1, 0xb6, 0xb4, 0xe1, 0xdc, 0xc4, 0x52, 0x5c, 0xf1, 0x3d, 0xe8, 0x17, 0x97, 0x17, 0x61, 0x2d, 0xa3, 0x62, 0x20, 0x55, 0x5b, 0xb0, 0xef, 0x6c, 0xbc, 0xb6, 0x90, 0x5e, 0x88, 0x3f, 0x81, 0xd5, 0x92, 0x21, 0x16, 0xe6, 0x63, 0x77, 0x67, 0xce, 0x55, 0xcc, 0x3a, 0x58, 0x44, 0x2e, 0xa4, 0x7e, 0xbf, 0xdc, 0xd8, 0xab, 0x92, 0x5f, 0xd7, 0x7d, 0x3b, 0x5f, 0xba, 0x73, 0x13, 0x4b, 0x71, 0xc3, 0x13, 0x58, 0xa9, 0xf4, 0x53, 0x25, 0xfb, 0xc6, 0x26, 0x7b, 0x53, 0x40, 0xdc, 0xef, 0x7f, 0xf1, 0xd5, 0xc0, 0xf8, 0xf2, 0xab, 0x81, 0xf1, 0xf7, 0xaf, 0x06, 0xc6, 0x4f, 0xbf, 0x1e, 0xd4, 0xbe, 0xfc, 0x7a, 0x50, 0xfb, 0xeb, 0xd7, 0x83, 0xda, 0x69, 0x93, 0xfe, 0x25, 0xf2, 0xbf, 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x07, 0x2d, 0x0f, 0x37, 0x22, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // WorkerClient is the client API for Worker service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type WorkerClient interface { QueryStatus(ctx context.Context, in *QueryStatusRequest, opts ...grpc.CallOption) (*QueryStatusResponse, error) // PurgeRelay purges relay log files for this dm-worker PurgeRelay(ctx context.Context, in *PurgeRelayRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) // Operate (get/set/remove) schema for a specified table in tracker. // a `set`/`remove` operation should be an one-time operation (only take effect once), // so we use a gRPC method rather than a etcd operation now (no persistent operation state). OperateSchema(ctx context.Context, in *OperateWorkerSchemaRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) OperateV1Meta(ctx context.Context, in *OperateV1MetaRequest, opts ...grpc.CallOption) (*OperateV1MetaResponse, error) HandleError(ctx context.Context, in *HandleWorkerErrorRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) GetWorkerCfg(ctx context.Context, in *GetWorkerCfgRequest, opts ...grpc.CallOption) (*GetWorkerCfgResponse, error) // only some fields of the configuration of the subtask in the sync phase can be updated CheckSubtasksCanUpdate(ctx context.Context, in *CheckSubtasksCanUpdateRequest, opts ...grpc.CallOption) (*CheckSubtasksCanUpdateResponse, error) GetWorkerValidatorStatus(ctx context.Context, in *GetValidationStatusRequest, opts ...grpc.CallOption) (*GetValidationStatusResponse, error) GetValidatorError(ctx context.Context, in *GetValidationErrorRequest, opts ...grpc.CallOption) (*GetValidationErrorResponse, error) OperateValidatorError(ctx context.Context, in *OperateValidationErrorRequest, opts ...grpc.CallOption) (*OperateValidationErrorResponse, error) UpdateValidator(ctx context.Context, in *UpdateValidationWorkerRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) } type workerClient struct { cc *grpc.ClientConn } func NewWorkerClient(cc *grpc.ClientConn) WorkerClient { return &workerClient{cc} } func (c *workerClient) QueryStatus(ctx context.Context, in *QueryStatusRequest, opts ...grpc.CallOption) (*QueryStatusResponse, error) { out := new(QueryStatusResponse) err := c.cc.Invoke(ctx, "/pb.Worker/QueryStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) PurgeRelay(ctx context.Context, in *PurgeRelayRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) { out := new(CommonWorkerResponse) err := c.cc.Invoke(ctx, "/pb.Worker/PurgeRelay", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) OperateSchema(ctx context.Context, in *OperateWorkerSchemaRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) { out := new(CommonWorkerResponse) err := c.cc.Invoke(ctx, "/pb.Worker/OperateSchema", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) OperateV1Meta(ctx context.Context, in *OperateV1MetaRequest, opts ...grpc.CallOption) (*OperateV1MetaResponse, error) { out := new(OperateV1MetaResponse) err := c.cc.Invoke(ctx, "/pb.Worker/OperateV1Meta", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) HandleError(ctx context.Context, in *HandleWorkerErrorRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) { out := new(CommonWorkerResponse) err := c.cc.Invoke(ctx, "/pb.Worker/HandleError", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) GetWorkerCfg(ctx context.Context, in *GetWorkerCfgRequest, opts ...grpc.CallOption) (*GetWorkerCfgResponse, error) { out := new(GetWorkerCfgResponse) err := c.cc.Invoke(ctx, "/pb.Worker/GetWorkerCfg", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) CheckSubtasksCanUpdate(ctx context.Context, in *CheckSubtasksCanUpdateRequest, opts ...grpc.CallOption) (*CheckSubtasksCanUpdateResponse, error) { out := new(CheckSubtasksCanUpdateResponse) err := c.cc.Invoke(ctx, "/pb.Worker/CheckSubtasksCanUpdate", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) GetWorkerValidatorStatus(ctx context.Context, in *GetValidationStatusRequest, opts ...grpc.CallOption) (*GetValidationStatusResponse, error) { out := new(GetValidationStatusResponse) err := c.cc.Invoke(ctx, "/pb.Worker/GetWorkerValidatorStatus", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) GetValidatorError(ctx context.Context, in *GetValidationErrorRequest, opts ...grpc.CallOption) (*GetValidationErrorResponse, error) { out := new(GetValidationErrorResponse) err := c.cc.Invoke(ctx, "/pb.Worker/GetValidatorError", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) OperateValidatorError(ctx context.Context, in *OperateValidationErrorRequest, opts ...grpc.CallOption) (*OperateValidationErrorResponse, error) { out := new(OperateValidationErrorResponse) err := c.cc.Invoke(ctx, "/pb.Worker/OperateValidatorError", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *workerClient) UpdateValidator(ctx context.Context, in *UpdateValidationWorkerRequest, opts ...grpc.CallOption) (*CommonWorkerResponse, error) { out := new(CommonWorkerResponse) err := c.cc.Invoke(ctx, "/pb.Worker/UpdateValidator", in, out, opts...) if err != nil { return nil, err } return out, nil } // WorkerServer is the server API for Worker service. type WorkerServer interface { QueryStatus(context.Context, *QueryStatusRequest) (*QueryStatusResponse, error) // PurgeRelay purges relay log files for this dm-worker PurgeRelay(context.Context, *PurgeRelayRequest) (*CommonWorkerResponse, error) // Operate (get/set/remove) schema for a specified table in tracker. // a `set`/`remove` operation should be an one-time operation (only take effect once), // so we use a gRPC method rather than a etcd operation now (no persistent operation state). OperateSchema(context.Context, *OperateWorkerSchemaRequest) (*CommonWorkerResponse, error) OperateV1Meta(context.Context, *OperateV1MetaRequest) (*OperateV1MetaResponse, error) HandleError(context.Context, *HandleWorkerErrorRequest) (*CommonWorkerResponse, error) GetWorkerCfg(context.Context, *GetWorkerCfgRequest) (*GetWorkerCfgResponse, error) // only some fields of the configuration of the subtask in the sync phase can be updated CheckSubtasksCanUpdate(context.Context, *CheckSubtasksCanUpdateRequest) (*CheckSubtasksCanUpdateResponse, error) GetWorkerValidatorStatus(context.Context, *GetValidationStatusRequest) (*GetValidationStatusResponse, error) GetValidatorError(context.Context, *GetValidationErrorRequest) (*GetValidationErrorResponse, error) OperateValidatorError(context.Context, *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) UpdateValidator(context.Context, *UpdateValidationWorkerRequest) (*CommonWorkerResponse, error) } // UnimplementedWorkerServer can be embedded to have forward compatible implementations. type UnimplementedWorkerServer struct { } func (*UnimplementedWorkerServer) QueryStatus(ctx context.Context, req *QueryStatusRequest) (*QueryStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method QueryStatus not implemented") } func (*UnimplementedWorkerServer) PurgeRelay(ctx context.Context, req *PurgeRelayRequest) (*CommonWorkerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method PurgeRelay not implemented") } func (*UnimplementedWorkerServer) OperateSchema(ctx context.Context, req *OperateWorkerSchemaRequest) (*CommonWorkerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateSchema not implemented") } func (*UnimplementedWorkerServer) OperateV1Meta(ctx context.Context, req *OperateV1MetaRequest) (*OperateV1MetaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateV1Meta not implemented") } func (*UnimplementedWorkerServer) HandleError(ctx context.Context, req *HandleWorkerErrorRequest) (*CommonWorkerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method HandleError not implemented") } func (*UnimplementedWorkerServer) GetWorkerCfg(ctx context.Context, req *GetWorkerCfgRequest) (*GetWorkerCfgResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetWorkerCfg not implemented") } func (*UnimplementedWorkerServer) CheckSubtasksCanUpdate(ctx context.Context, req *CheckSubtasksCanUpdateRequest) (*CheckSubtasksCanUpdateResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CheckSubtasksCanUpdate not implemented") } func (*UnimplementedWorkerServer) GetWorkerValidatorStatus(ctx context.Context, req *GetValidationStatusRequest) (*GetValidationStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetWorkerValidatorStatus not implemented") } func (*UnimplementedWorkerServer) GetValidatorError(ctx context.Context, req *GetValidationErrorRequest) (*GetValidationErrorResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetValidatorError not implemented") } func (*UnimplementedWorkerServer) OperateValidatorError(ctx context.Context, req *OperateValidationErrorRequest) (*OperateValidationErrorResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method OperateValidatorError not implemented") } func (*UnimplementedWorkerServer) UpdateValidator(ctx context.Context, req *UpdateValidationWorkerRequest) (*CommonWorkerResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateValidator not implemented") } func RegisterWorkerServer(s *grpc.Server, srv WorkerServer) { s.RegisterService(&_Worker_serviceDesc, srv) } func _Worker_QueryStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(QueryStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).QueryStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/QueryStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).QueryStatus(ctx, req.(*QueryStatusRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_PurgeRelay_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PurgeRelayRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).PurgeRelay(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/PurgeRelay", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).PurgeRelay(ctx, req.(*PurgeRelayRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_OperateSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateWorkerSchemaRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).OperateSchema(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/OperateSchema", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).OperateSchema(ctx, req.(*OperateWorkerSchemaRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_OperateV1Meta_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateV1MetaRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).OperateV1Meta(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/OperateV1Meta", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).OperateV1Meta(ctx, req.(*OperateV1MetaRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_HandleError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HandleWorkerErrorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).HandleError(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/HandleError", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).HandleError(ctx, req.(*HandleWorkerErrorRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_GetWorkerCfg_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetWorkerCfgRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).GetWorkerCfg(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/GetWorkerCfg", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).GetWorkerCfg(ctx, req.(*GetWorkerCfgRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_CheckSubtasksCanUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CheckSubtasksCanUpdateRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).CheckSubtasksCanUpdate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/CheckSubtasksCanUpdate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).CheckSubtasksCanUpdate(ctx, req.(*CheckSubtasksCanUpdateRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_GetWorkerValidatorStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetValidationStatusRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).GetWorkerValidatorStatus(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/GetWorkerValidatorStatus", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).GetWorkerValidatorStatus(ctx, req.(*GetValidationStatusRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_GetValidatorError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetValidationErrorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).GetValidatorError(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/GetValidatorError", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).GetValidatorError(ctx, req.(*GetValidationErrorRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_OperateValidatorError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(OperateValidationErrorRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).OperateValidatorError(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/OperateValidatorError", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).OperateValidatorError(ctx, req.(*OperateValidationErrorRequest)) } return interceptor(ctx, in, info, handler) } func _Worker_UpdateValidator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateValidationWorkerRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(WorkerServer).UpdateValidator(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pb.Worker/UpdateValidator", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServer).UpdateValidator(ctx, req.(*UpdateValidationWorkerRequest)) } return interceptor(ctx, in, info, handler) } var _Worker_serviceDesc = grpc.ServiceDesc{ ServiceName: "pb.Worker", HandlerType: (*WorkerServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "QueryStatus", Handler: _Worker_QueryStatus_Handler, }, { MethodName: "PurgeRelay", Handler: _Worker_PurgeRelay_Handler, }, { MethodName: "OperateSchema", Handler: _Worker_OperateSchema_Handler, }, { MethodName: "OperateV1Meta", Handler: _Worker_OperateV1Meta_Handler, }, { MethodName: "HandleError", Handler: _Worker_HandleError_Handler, }, { MethodName: "GetWorkerCfg", Handler: _Worker_GetWorkerCfg_Handler, }, { MethodName: "CheckSubtasksCanUpdate", Handler: _Worker_CheckSubtasksCanUpdate_Handler, }, { MethodName: "GetWorkerValidatorStatus", Handler: _Worker_GetWorkerValidatorStatus_Handler, }, { MethodName: "GetValidatorError", Handler: _Worker_GetValidatorError_Handler, }, { MethodName: "OperateValidatorError", Handler: _Worker_OperateValidatorError_Handler, }, { MethodName: "UpdateValidator", Handler: _Worker_UpdateValidator_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "dmworker.proto", } func (m *QueryStatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *QueryStatusRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *QueryStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *CommonWorkerResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CommonWorkerResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *CommonWorkerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Worker) > 0 { i -= len(m.Worker) copy(dAtA[i:], m.Worker) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Worker))) i-- dAtA[i] = 0x22 } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0x1a } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *QueryStatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *QueryStatusResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *QueryStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SubTaskStatus) > 0 { for iNdEx := len(m.SubTaskStatus) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.SubTaskStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } if m.SourceStatus != nil { { size, err := m.SourceStatus.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *CheckStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *CheckStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Detail) > 0 { i -= len(m.Detail) copy(dAtA[i:], m.Detail) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Detail))) i-- dAtA[i] = 0x32 } if m.Warning != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Warning)) i-- dAtA[i] = 0x28 } if m.Failed != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Failed)) i-- dAtA[i] = 0x20 } if m.Successful != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Successful)) i-- dAtA[i] = 0x18 } if m.Total != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Total)) i-- dAtA[i] = 0x10 } if m.Passed { i-- if m.Passed { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *DumpStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *DumpStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *DumpStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Progress) > 0 { i -= len(m.Progress) copy(dAtA[i:], m.Progress) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Progress))) i-- dAtA[i] = 0x3a } if m.Bps != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Bps)) i-- dAtA[i] = 0x30 } if m.EstimateTotalRows != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.EstimateTotalRows)))) i-- dAtA[i] = 0x29 } if m.FinishedRows != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.FinishedRows)))) i-- dAtA[i] = 0x21 } if m.FinishedBytes != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.FinishedBytes)))) i-- dAtA[i] = 0x19 } if m.CompletedTables != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CompletedTables)))) i-- dAtA[i] = 0x11 } if m.TotalTables != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.TotalTables)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *LoadStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LoadStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *LoadStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Bps != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Bps)) i-- dAtA[i] = 0x30 } if len(m.MetaBinlogGTID) > 0 { i -= len(m.MetaBinlogGTID) copy(dAtA[i:], m.MetaBinlogGTID) i = encodeVarintDmworker(dAtA, i, uint64(len(m.MetaBinlogGTID))) i-- dAtA[i] = 0x2a } if len(m.MetaBinlog) > 0 { i -= len(m.MetaBinlog) copy(dAtA[i:], m.MetaBinlog) i = encodeVarintDmworker(dAtA, i, uint64(len(m.MetaBinlog))) i-- dAtA[i] = 0x22 } if len(m.Progress) > 0 { i -= len(m.Progress) copy(dAtA[i:], m.Progress) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Progress))) i-- dAtA[i] = 0x1a } if m.TotalBytes != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.TotalBytes)) i-- dAtA[i] = 0x10 } if m.FinishedBytes != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.FinishedBytes)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *ShardingGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ShardingGroup) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ShardingGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Unsynced) > 0 { for iNdEx := len(m.Unsynced) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Unsynced[iNdEx]) copy(dAtA[i:], m.Unsynced[iNdEx]) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Unsynced[iNdEx]))) i-- dAtA[i] = 0x2a } } if len(m.Synced) > 0 { for iNdEx := len(m.Synced) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Synced[iNdEx]) copy(dAtA[i:], m.Synced[iNdEx]) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Synced[iNdEx]))) i-- dAtA[i] = 0x22 } } if len(m.FirstLocation) > 0 { i -= len(m.FirstLocation) copy(dAtA[i:], m.FirstLocation) i = encodeVarintDmworker(dAtA, i, uint64(len(m.FirstLocation))) i-- dAtA[i] = 0x1a } if len(m.DDLs) > 0 { for iNdEx := len(m.DDLs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.DDLs[iNdEx]) copy(dAtA[i:], m.DDLs[iNdEx]) i = encodeVarintDmworker(dAtA, i, uint64(len(m.DDLs[iNdEx]))) i-- dAtA[i] = 0x12 } } if len(m.Target) > 0 { i -= len(m.Target) copy(dAtA[i:], m.Target) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Target))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *SyncStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SyncStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SyncStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.DumpIOTotalBytes != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.DumpIOTotalBytes)) i-- dAtA[i] = 0x1 i-- dAtA[i] = 0x98 } if m.IoTotalBytes != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.IoTotalBytes)) i-- dAtA[i] = 0x1 i-- dAtA[i] = 0x90 } if m.RecentRps != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.RecentRps)) i-- dAtA[i] = 0x1 i-- dAtA[i] = 0x88 } if m.TotalRps != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.TotalRps)) i-- dAtA[i] = 0x1 i-- dAtA[i] = 0x80 } if m.TotalRows != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.TotalRows)) i-- dAtA[i] = 0x78 } if len(m.ConflictMsg) > 0 { i -= len(m.ConflictMsg) copy(dAtA[i:], m.ConflictMsg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ConflictMsg))) i-- dAtA[i] = 0x72 } if len(m.BlockDDLOwner) > 0 { i -= len(m.BlockDDLOwner) copy(dAtA[i:], m.BlockDDLOwner) i = encodeVarintDmworker(dAtA, i, uint64(len(m.BlockDDLOwner))) i-- dAtA[i] = 0x6a } if m.SecondsBehindMaster != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.SecondsBehindMaster)) i-- dAtA[i] = 0x60 } if len(m.BinlogType) > 0 { i -= len(m.BinlogType) copy(dAtA[i:], m.BinlogType) i = encodeVarintDmworker(dAtA, i, uint64(len(m.BinlogType))) i-- dAtA[i] = 0x5a } if m.Synced { i-- if m.Synced { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x50 } if len(m.UnresolvedGroups) > 0 { for iNdEx := len(m.UnresolvedGroups) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.UnresolvedGroups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x4a } } if len(m.BlockingDDLs) > 0 { for iNdEx := len(m.BlockingDDLs) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.BlockingDDLs[iNdEx]) copy(dAtA[i:], m.BlockingDDLs[iNdEx]) i = encodeVarintDmworker(dAtA, i, uint64(len(m.BlockingDDLs[iNdEx]))) i-- dAtA[i] = 0x42 } } if len(m.SyncerBinlogGtid) > 0 { i -= len(m.SyncerBinlogGtid) copy(dAtA[i:], m.SyncerBinlogGtid) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SyncerBinlogGtid))) i-- dAtA[i] = 0x3a } if len(m.SyncerBinlog) > 0 { i -= len(m.SyncerBinlog) copy(dAtA[i:], m.SyncerBinlog) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SyncerBinlog))) i-- dAtA[i] = 0x32 } if len(m.MasterBinlogGtid) > 0 { i -= len(m.MasterBinlogGtid) copy(dAtA[i:], m.MasterBinlogGtid) i = encodeVarintDmworker(dAtA, i, uint64(len(m.MasterBinlogGtid))) i-- dAtA[i] = 0x2a } if len(m.MasterBinlog) > 0 { i -= len(m.MasterBinlog) copy(dAtA[i:], m.MasterBinlog) i = encodeVarintDmworker(dAtA, i, uint64(len(m.MasterBinlog))) i-- dAtA[i] = 0x22 } if m.RecentTps != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.RecentTps)) i-- dAtA[i] = 0x18 } if m.TotalTps != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.TotalTps)) i-- dAtA[i] = 0x10 } if m.TotalEvents != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.TotalEvents)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *SourceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SourceStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.RelayStatus != nil { { size, err := m.RelayStatus.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } if m.Result != nil { { size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } if len(m.Worker) > 0 { i -= len(m.Worker) copy(dAtA[i:], m.Worker) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Worker))) i-- dAtA[i] = 0x12 } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *RelayStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RelayStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RelayStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Result != nil { { size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x42 } if m.Stage != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Stage)) i-- dAtA[i] = 0x38 } if m.RelayCatchUpMaster { i-- if m.RelayCatchUpMaster { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x30 } if len(m.RelayBinlogGtid) > 0 { i -= len(m.RelayBinlogGtid) copy(dAtA[i:], m.RelayBinlogGtid) i = encodeVarintDmworker(dAtA, i, uint64(len(m.RelayBinlogGtid))) i-- dAtA[i] = 0x2a } if len(m.RelayBinlog) > 0 { i -= len(m.RelayBinlog) copy(dAtA[i:], m.RelayBinlog) i = encodeVarintDmworker(dAtA, i, uint64(len(m.RelayBinlog))) i-- dAtA[i] = 0x22 } if len(m.RelaySubDir) > 0 { i -= len(m.RelaySubDir) copy(dAtA[i:], m.RelaySubDir) i = encodeVarintDmworker(dAtA, i, uint64(len(m.RelaySubDir))) i-- dAtA[i] = 0x1a } if len(m.MasterBinlogGtid) > 0 { i -= len(m.MasterBinlogGtid) copy(dAtA[i:], m.MasterBinlogGtid) i = encodeVarintDmworker(dAtA, i, uint64(len(m.MasterBinlogGtid))) i-- dAtA[i] = 0x12 } if len(m.MasterBinlog) > 0 { i -= len(m.MasterBinlog) copy(dAtA[i:], m.MasterBinlog) i = encodeVarintDmworker(dAtA, i, uint64(len(m.MasterBinlog))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *SubTaskStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SubTaskStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Validation != nil { { size, err := m.Validation.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x5a } if m.Status != nil { { size := m.Status.Size() i -= size if _, err := m.Status.MarshalTo(dAtA[i:]); err != nil { return 0, err } } } if len(m.UnresolvedDDLLockID) > 0 { i -= len(m.UnresolvedDDLLockID) copy(dAtA[i:], m.UnresolvedDDLLockID) i = encodeVarintDmworker(dAtA, i, uint64(len(m.UnresolvedDDLLockID))) i-- dAtA[i] = 0x2a } if m.Result != nil { { size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } if m.Unit != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Unit)) i-- dAtA[i] = 0x18 } if m.Stage != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Stage)) i-- dAtA[i] = 0x10 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *SubTaskStatus_Msg) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskStatus_Msg) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x32 return len(dAtA) - i, nil } func (m *SubTaskStatus_Check) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskStatus_Check) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Check != nil { { size, err := m.Check.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x3a } return len(dAtA) - i, nil } func (m *SubTaskStatus_Dump) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskStatus_Dump) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Dump != nil { { size, err := m.Dump.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x42 } return len(dAtA) - i, nil } func (m *SubTaskStatus_Load) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskStatus_Load) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Load != nil { { size, err := m.Load.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x4a } return len(dAtA) - i, nil } func (m *SubTaskStatus_Sync) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskStatus_Sync) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Sync != nil { { size, err := m.Sync.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x52 } return len(dAtA) - i, nil } func (m *SubTaskStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SubTaskStatusList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Status) > 0 { for iNdEx := len(m.Status) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Status[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *CheckError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *CheckError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *DumpError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *DumpError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *DumpError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *LoadError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LoadError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *LoadError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *SyncSQLError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SyncSQLError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SyncSQLError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.ErrorSQL) > 0 { i -= len(m.ErrorSQL) copy(dAtA[i:], m.ErrorSQL) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ErrorSQL))) i-- dAtA[i] = 0x1a } if len(m.FailedBinlogPosition) > 0 { i -= len(m.FailedBinlogPosition) copy(dAtA[i:], m.FailedBinlogPosition) i = encodeVarintDmworker(dAtA, i, uint64(len(m.FailedBinlogPosition))) i-- dAtA[i] = 0x12 } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *SyncError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SyncError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SyncError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Errors) > 0 { for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Errors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *SourceError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SourceError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SourceError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.RelayError != nil { { size, err := m.RelayError.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } if len(m.SourceError) > 0 { i -= len(m.SourceError) copy(dAtA[i:], m.SourceError) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SourceError))) i-- dAtA[i] = 0x1a } if len(m.Worker) > 0 { i -= len(m.Worker) copy(dAtA[i:], m.Worker) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Worker))) i-- dAtA[i] = 0x12 } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *RelayError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *RelayError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *RelayError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *SubTaskError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SubTaskError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Error != nil { { size := m.Error.Size() i -= size if _, err := m.Error.MarshalTo(dAtA[i:]); err != nil { return 0, err } } } if m.Unit != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Unit)) i-- dAtA[i] = 0x18 } if m.Stage != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Stage)) i-- dAtA[i] = 0x10 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *SubTaskError_Msg) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskError_Msg) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x22 return len(dAtA) - i, nil } func (m *SubTaskError_Check) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskError_Check) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Check != nil { { size, err := m.Check.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x2a } return len(dAtA) - i, nil } func (m *SubTaskError_Dump) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskError_Dump) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Dump != nil { { size, err := m.Dump.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x32 } return len(dAtA) - i, nil } func (m *SubTaskError_Load) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskError_Load) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Load != nil { { size, err := m.Load.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x3a } return len(dAtA) - i, nil } func (m *SubTaskError_Sync) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskError_Sync) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) if m.Sync != nil { { size, err := m.Sync.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x42 } return len(dAtA) - i, nil } func (m *SubTaskErrorList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *SubTaskErrorList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *SubTaskErrorList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Error) > 0 { for iNdEx := len(m.Error) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Error[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *ProcessResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ProcessResult) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ProcessResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Detail) > 0 { i -= len(m.Detail) copy(dAtA[i:], m.Detail) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Detail))) i-- dAtA[i] = 0x1a } if len(m.Errors) > 0 { for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Errors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } if m.IsCanceled { i-- if m.IsCanceled { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *ProcessError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ProcessError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ProcessError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Workaround) > 0 { i -= len(m.Workaround) copy(dAtA[i:], m.Workaround) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Workaround))) i-- dAtA[i] = 0x3a } if len(m.RawCause) > 0 { i -= len(m.RawCause) copy(dAtA[i:], m.RawCause) i = encodeVarintDmworker(dAtA, i, uint64(len(m.RawCause))) i-- dAtA[i] = 0x32 } if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x2a } if len(m.ErrLevel) > 0 { i -= len(m.ErrLevel) copy(dAtA[i:], m.ErrLevel) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ErrLevel))) i-- dAtA[i] = 0x22 } if len(m.ErrScope) > 0 { i -= len(m.ErrScope) copy(dAtA[i:], m.ErrScope) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ErrScope))) i-- dAtA[i] = 0x1a } if len(m.ErrClass) > 0 { i -= len(m.ErrClass) copy(dAtA[i:], m.ErrClass) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ErrClass))) i-- dAtA[i] = 0x12 } if m.ErrCode != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.ErrCode)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *PurgeRelayRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *PurgeRelayRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *PurgeRelayRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SubDir) > 0 { i -= len(m.SubDir) copy(dAtA[i:], m.SubDir) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SubDir))) i-- dAtA[i] = 0x22 } if len(m.Filename) > 0 { i -= len(m.Filename) copy(dAtA[i:], m.Filename) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Filename))) i-- dAtA[i] = 0x1a } if m.Time != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Time)) i-- dAtA[i] = 0x10 } if m.Inactive { i-- if m.Inactive { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateWorkerSchemaRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateWorkerSchemaRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateWorkerSchemaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.FromTarget { i-- if m.FromTarget { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x50 } if m.FromSource { i-- if m.FromSource { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x48 } if m.Sync { i-- if m.Sync { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x40 } if m.Flush { i-- if m.Flush { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x38 } if len(m.Schema) > 0 { i -= len(m.Schema) copy(dAtA[i:], m.Schema) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Schema))) i-- dAtA[i] = 0x32 } if len(m.Table) > 0 { i -= len(m.Table) copy(dAtA[i:], m.Table) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Table))) i-- dAtA[i] = 0x2a } if len(m.Database) > 0 { i -= len(m.Database) copy(dAtA[i:], m.Database) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Database))) i-- dAtA[i] = 0x22 } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0x1a } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0x12 } if m.Op != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *V1SubTaskMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *V1SubTaskMeta) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *V1SubTaskMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0x22 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x1a } if m.Stage != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Stage)) i-- dAtA[i] = 0x10 } if m.Op != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateV1MetaRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateV1MetaRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateV1MetaRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Op != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateV1MetaResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateV1MetaResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateV1MetaResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Meta) > 0 { for k := range m.Meta { v := m.Meta[k] baseI := i if v != nil { { size, err := v.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } i -= len(k) copy(dAtA[i:], k) i = encodeVarintDmworker(dAtA, i, uint64(len(k))) i-- dAtA[i] = 0xa i = encodeVarintDmworker(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *HandleWorkerErrorRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *HandleWorkerErrorRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *HandleWorkerErrorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Sqls) > 0 { for iNdEx := len(m.Sqls) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Sqls[iNdEx]) copy(dAtA[i:], m.Sqls[iNdEx]) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Sqls[iNdEx]))) i-- dAtA[i] = 0x22 } } if len(m.BinlogPos) > 0 { i -= len(m.BinlogPos) copy(dAtA[i:], m.BinlogPos) i = encodeVarintDmworker(dAtA, i, uint64(len(m.BinlogPos))) i-- dAtA[i] = 0x1a } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0x12 } if m.Op != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *GetWorkerCfgRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetWorkerCfgRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetWorkerCfgRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l return len(dAtA) - i, nil } func (m *GetWorkerCfgResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetWorkerCfgResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetWorkerCfgResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Cfg) > 0 { i -= len(m.Cfg) copy(dAtA[i:], m.Cfg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Cfg))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *CheckSubtasksCanUpdateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckSubtasksCanUpdateRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *CheckSubtasksCanUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SubtaskCfgTomlString) > 0 { i -= len(m.SubtaskCfgTomlString) copy(dAtA[i:], m.SubtaskCfgTomlString) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SubtaskCfgTomlString))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *CheckSubtasksCanUpdateResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *CheckSubtasksCanUpdateResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *CheckSubtasksCanUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Success { i-- if m.Success { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *GetValidationStatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetValidationStatusRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetValidationStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.FilterStatus != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.FilterStatus)) i-- dAtA[i] = 0x10 } if len(m.TaskName) > 0 { i -= len(m.TaskName) copy(dAtA[i:], m.TaskName) i = encodeVarintDmworker(dAtA, i, uint64(len(m.TaskName))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *ValidationStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ValidationStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ValidationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.CutoverBinlogGtid) > 0 { i -= len(m.CutoverBinlogGtid) copy(dAtA[i:], m.CutoverBinlogGtid) i = encodeVarintDmworker(dAtA, i, uint64(len(m.CutoverBinlogGtid))) i-- dAtA[i] = 0x62 } if len(m.CutoverBinlogPos) > 0 { i -= len(m.CutoverBinlogPos) copy(dAtA[i:], m.CutoverBinlogPos) i = encodeVarintDmworker(dAtA, i, uint64(len(m.CutoverBinlogPos))) i-- dAtA[i] = 0x5a } if len(m.ErrorRowsStatus) > 0 { i -= len(m.ErrorRowsStatus) copy(dAtA[i:], m.ErrorRowsStatus) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ErrorRowsStatus))) i-- dAtA[i] = 0x52 } if len(m.PendingRowsStatus) > 0 { i -= len(m.PendingRowsStatus) copy(dAtA[i:], m.PendingRowsStatus) i = encodeVarintDmworker(dAtA, i, uint64(len(m.PendingRowsStatus))) i-- dAtA[i] = 0x4a } if len(m.ProcessedRowsStatus) > 0 { i -= len(m.ProcessedRowsStatus) copy(dAtA[i:], m.ProcessedRowsStatus) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ProcessedRowsStatus))) i-- dAtA[i] = 0x42 } if m.Result != nil { { size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x3a } if len(m.ValidatorBinlogGtid) > 0 { i -= len(m.ValidatorBinlogGtid) copy(dAtA[i:], m.ValidatorBinlogGtid) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ValidatorBinlogGtid))) i-- dAtA[i] = 0x32 } if len(m.ValidatorBinlog) > 0 { i -= len(m.ValidatorBinlog) copy(dAtA[i:], m.ValidatorBinlog) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ValidatorBinlog))) i-- dAtA[i] = 0x2a } if m.Stage != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Stage)) i-- dAtA[i] = 0x20 } if len(m.Mode) > 0 { i -= len(m.Mode) copy(dAtA[i:], m.Mode) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Mode))) i-- dAtA[i] = 0x1a } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0x12 } if len(m.Task) > 0 { i -= len(m.Task) copy(dAtA[i:], m.Task) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Task))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *ValidationTableStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ValidationTableStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ValidationTableStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x2a } if m.Stage != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Stage)) i-- dAtA[i] = 0x20 } if len(m.DstTable) > 0 { i -= len(m.DstTable) copy(dAtA[i:], m.DstTable) i = encodeVarintDmworker(dAtA, i, uint64(len(m.DstTable))) i-- dAtA[i] = 0x1a } if len(m.SrcTable) > 0 { i -= len(m.SrcTable) copy(dAtA[i:], m.SrcTable) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SrcTable))) i-- dAtA[i] = 0x12 } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *GetValidationStatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetValidationStatusResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetValidationStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.TableStatuses) > 0 { for iNdEx := len(m.TableStatuses) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.TableStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } if len(m.Validators) > 0 { for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *GetValidationErrorRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetValidationErrorRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetValidationErrorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.TaskName) > 0 { i -= len(m.TaskName) copy(dAtA[i:], m.TaskName) i = encodeVarintDmworker(dAtA, i, uint64(len(m.TaskName))) i-- dAtA[i] = 0x12 } if m.ErrState != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.ErrState)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *ValidationError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ValidationError) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ValidationError) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x52 } if len(m.Time) > 0 { i -= len(m.Time) copy(dAtA[i:], m.Time) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Time))) i-- dAtA[i] = 0x4a } if m.Status != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Status)) i-- dAtA[i] = 0x40 } if len(m.ErrorType) > 0 { i -= len(m.ErrorType) copy(dAtA[i:], m.ErrorType) i = encodeVarintDmworker(dAtA, i, uint64(len(m.ErrorType))) i-- dAtA[i] = 0x3a } if len(m.DstData) > 0 { i -= len(m.DstData) copy(dAtA[i:], m.DstData) i = encodeVarintDmworker(dAtA, i, uint64(len(m.DstData))) i-- dAtA[i] = 0x32 } if len(m.DstTable) > 0 { i -= len(m.DstTable) copy(dAtA[i:], m.DstTable) i = encodeVarintDmworker(dAtA, i, uint64(len(m.DstTable))) i-- dAtA[i] = 0x2a } if len(m.SrcData) > 0 { i -= len(m.SrcData) copy(dAtA[i:], m.SrcData) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SrcData))) i-- dAtA[i] = 0x22 } if len(m.SrcTable) > 0 { i -= len(m.SrcTable) copy(dAtA[i:], m.SrcTable) i = encodeVarintDmworker(dAtA, i, uint64(len(m.SrcTable))) i-- dAtA[i] = 0x1a } if len(m.Source) > 0 { i -= len(m.Source) copy(dAtA[i:], m.Source) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Source))) i-- dAtA[i] = 0x12 } if len(m.Id) > 0 { i -= len(m.Id) copy(dAtA[i:], m.Id) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Id))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func (m *GetValidationErrorResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *GetValidationErrorResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *GetValidationErrorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Error) > 0 { for iNdEx := len(m.Error) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Error[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintDmworker(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateValidationErrorRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateValidationErrorRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateValidationErrorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.ErrId != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.ErrId)) i-- dAtA[i] = 0x20 } if len(m.TaskName) > 0 { i -= len(m.TaskName) copy(dAtA[i:], m.TaskName) i = encodeVarintDmworker(dAtA, i, uint64(len(m.TaskName))) i-- dAtA[i] = 0x1a } if m.IsAllError { i-- if m.IsAllError { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x10 } if m.Op != 0 { i = encodeVarintDmworker(dAtA, i, uint64(m.Op)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *OperateValidationErrorResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *OperateValidationErrorResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *OperateValidationErrorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.Msg) > 0 { i -= len(m.Msg) copy(dAtA[i:], m.Msg) i = encodeVarintDmworker(dAtA, i, uint64(len(m.Msg))) i-- dAtA[i] = 0x12 } if m.Result { i-- if m.Result { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } func (m *UpdateValidationWorkerRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *UpdateValidationWorkerRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *UpdateValidationWorkerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.BinlogGTID) > 0 { i -= len(m.BinlogGTID) copy(dAtA[i:], m.BinlogGTID) i = encodeVarintDmworker(dAtA, i, uint64(len(m.BinlogGTID))) i-- dAtA[i] = 0x1a } if len(m.BinlogPos) > 0 { i -= len(m.BinlogPos) copy(dAtA[i:], m.BinlogPos) i = encodeVarintDmworker(dAtA, i, uint64(len(m.BinlogPos))) i-- dAtA[i] = 0x12 } if len(m.TaskName) > 0 { i -= len(m.TaskName) copy(dAtA[i:], m.TaskName) i = encodeVarintDmworker(dAtA, i, uint64(len(m.TaskName))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } func encodeVarintDmworker(dAtA []byte, offset int, v uint64) int { offset -= sovDmworker(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *QueryStatusRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *CommonWorkerResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Source) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Worker) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *QueryStatusResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.SourceStatus != nil { l = m.SourceStatus.Size() n += 1 + l + sovDmworker(uint64(l)) } if len(m.SubTaskStatus) > 0 { for _, e := range m.SubTaskStatus { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *CheckStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Passed { n += 2 } if m.Total != 0 { n += 1 + sovDmworker(uint64(m.Total)) } if m.Successful != 0 { n += 1 + sovDmworker(uint64(m.Successful)) } if m.Failed != 0 { n += 1 + sovDmworker(uint64(m.Failed)) } if m.Warning != 0 { n += 1 + sovDmworker(uint64(m.Warning)) } l = len(m.Detail) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *DumpStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.TotalTables != 0 { n += 1 + sovDmworker(uint64(m.TotalTables)) } if m.CompletedTables != 0 { n += 9 } if m.FinishedBytes != 0 { n += 9 } if m.FinishedRows != 0 { n += 9 } if m.EstimateTotalRows != 0 { n += 9 } if m.Bps != 0 { n += 1 + sovDmworker(uint64(m.Bps)) } l = len(m.Progress) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *LoadStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.FinishedBytes != 0 { n += 1 + sovDmworker(uint64(m.FinishedBytes)) } if m.TotalBytes != 0 { n += 1 + sovDmworker(uint64(m.TotalBytes)) } l = len(m.Progress) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.MetaBinlog) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.MetaBinlogGTID) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Bps != 0 { n += 1 + sovDmworker(uint64(m.Bps)) } return n } func (m *ShardingGroup) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Target) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if len(m.DDLs) > 0 { for _, s := range m.DDLs { l = len(s) n += 1 + l + sovDmworker(uint64(l)) } } l = len(m.FirstLocation) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if len(m.Synced) > 0 { for _, s := range m.Synced { l = len(s) n += 1 + l + sovDmworker(uint64(l)) } } if len(m.Unsynced) > 0 { for _, s := range m.Unsynced { l = len(s) n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *SyncStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.TotalEvents != 0 { n += 1 + sovDmworker(uint64(m.TotalEvents)) } if m.TotalTps != 0 { n += 1 + sovDmworker(uint64(m.TotalTps)) } if m.RecentTps != 0 { n += 1 + sovDmworker(uint64(m.RecentTps)) } l = len(m.MasterBinlog) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.MasterBinlogGtid) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.SyncerBinlog) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.SyncerBinlogGtid) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if len(m.BlockingDDLs) > 0 { for _, s := range m.BlockingDDLs { l = len(s) n += 1 + l + sovDmworker(uint64(l)) } } if len(m.UnresolvedGroups) > 0 { for _, e := range m.UnresolvedGroups { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } if m.Synced { n += 2 } l = len(m.BinlogType) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.SecondsBehindMaster != 0 { n += 1 + sovDmworker(uint64(m.SecondsBehindMaster)) } l = len(m.BlockDDLOwner) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ConflictMsg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.TotalRows != 0 { n += 1 + sovDmworker(uint64(m.TotalRows)) } if m.TotalRps != 0 { n += 2 + sovDmworker(uint64(m.TotalRps)) } if m.RecentRps != 0 { n += 2 + sovDmworker(uint64(m.RecentRps)) } if m.IoTotalBytes != 0 { n += 2 + sovDmworker(uint64(m.IoTotalBytes)) } if m.DumpIOTotalBytes != 0 { n += 2 + sovDmworker(uint64(m.DumpIOTotalBytes)) } return n } func (m *SourceStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Source) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Worker) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Result != nil { l = m.Result.Size() n += 1 + l + sovDmworker(uint64(l)) } if m.RelayStatus != nil { l = m.RelayStatus.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *RelayStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.MasterBinlog) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.MasterBinlogGtid) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.RelaySubDir) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.RelayBinlog) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.RelayBinlogGtid) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.RelayCatchUpMaster { n += 2 } if m.Stage != 0 { n += 1 + sovDmworker(uint64(m.Stage)) } if m.Result != nil { l = m.Result.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Stage != 0 { n += 1 + sovDmworker(uint64(m.Stage)) } if m.Unit != 0 { n += 1 + sovDmworker(uint64(m.Unit)) } if m.Result != nil { l = m.Result.Size() n += 1 + l + sovDmworker(uint64(l)) } l = len(m.UnresolvedDDLLockID) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Status != nil { n += m.Status.Size() } if m.Validation != nil { l = m.Validation.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskStatus_Msg) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) n += 1 + l + sovDmworker(uint64(l)) return n } func (m *SubTaskStatus_Check) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Check != nil { l = m.Check.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskStatus_Dump) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Dump != nil { l = m.Dump.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskStatus_Load) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Load != nil { l = m.Load.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskStatus_Sync) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Sync != nil { l = m.Sync.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskStatusList) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Status) > 0 { for _, e := range m.Status { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *CheckError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *DumpError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *LoadError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SyncSQLError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.FailedBinlogPosition) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ErrorSQL) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SyncError) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Errors) > 0 { for _, e := range m.Errors { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *SourceError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Source) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Worker) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.SourceError) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.RelayError != nil { l = m.RelayError.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *RelayError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Stage != 0 { n += 1 + sovDmworker(uint64(m.Stage)) } if m.Unit != 0 { n += 1 + sovDmworker(uint64(m.Unit)) } if m.Error != nil { n += m.Error.Size() } return n } func (m *SubTaskError_Msg) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Msg) n += 1 + l + sovDmworker(uint64(l)) return n } func (m *SubTaskError_Check) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Check != nil { l = m.Check.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskError_Dump) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Dump != nil { l = m.Dump.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskError_Load) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Load != nil { l = m.Load.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskError_Sync) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Sync != nil { l = m.Sync.Size() n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *SubTaskErrorList) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.Error) > 0 { for _, e := range m.Error { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *ProcessResult) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.IsCanceled { n += 2 } if len(m.Errors) > 0 { for _, e := range m.Errors { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } l = len(m.Detail) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *ProcessError) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.ErrCode != 0 { n += 1 + sovDmworker(uint64(m.ErrCode)) } l = len(m.ErrClass) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ErrScope) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ErrLevel) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Message) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.RawCause) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Workaround) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *PurgeRelayRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Inactive { n += 2 } if m.Time != 0 { n += 1 + sovDmworker(uint64(m.Time)) } l = len(m.Filename) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.SubDir) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *OperateWorkerSchemaRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmworker(uint64(m.Op)) } l = len(m.Task) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Source) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Database) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Table) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Schema) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Flush { n += 2 } if m.Sync { n += 2 } if m.FromSource { n += 2 } if m.FromTarget { n += 2 } return n } func (m *V1SubTaskMeta) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmworker(uint64(m.Op)) } if m.Stage != 0 { n += 1 + sovDmworker(uint64(m.Stage)) } l = len(m.Name) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Task) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *OperateV1MetaRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmworker(uint64(m.Op)) } return n } func (m *OperateV1MetaResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if len(m.Meta) > 0 { for k, v := range m.Meta { _ = k _ = v l = 0 if v != nil { l = v.Size() l += 1 + sovDmworker(uint64(l)) } mapEntrySize := 1 + len(k) + sovDmworker(uint64(len(k))) + l n += mapEntrySize + 1 + sovDmworker(uint64(mapEntrySize)) } } return n } func (m *HandleWorkerErrorRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmworker(uint64(m.Op)) } l = len(m.Task) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.BinlogPos) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if len(m.Sqls) > 0 { for _, s := range m.Sqls { l = len(s) n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *GetWorkerCfgRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l return n } func (m *GetWorkerCfgResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Cfg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *CheckSubtasksCanUpdateRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.SubtaskCfgTomlString) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *CheckSubtasksCanUpdateResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Success { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *GetValidationStatusRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.TaskName) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.FilterStatus != 0 { n += 1 + sovDmworker(uint64(m.FilterStatus)) } return n } func (m *ValidationStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Task) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Source) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Mode) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Stage != 0 { n += 1 + sovDmworker(uint64(m.Stage)) } l = len(m.ValidatorBinlog) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ValidatorBinlogGtid) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Result != nil { l = m.Result.Size() n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ProcessedRowsStatus) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.PendingRowsStatus) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ErrorRowsStatus) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.CutoverBinlogPos) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.CutoverBinlogGtid) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *ValidationTableStatus) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Source) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.SrcTable) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.DstTable) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Stage != 0 { n += 1 + sovDmworker(uint64(m.Stage)) } l = len(m.Message) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *GetValidationStatusResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if len(m.Validators) > 0 { for _, e := range m.Validators { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } if len(m.TableStatuses) > 0 { for _, e := range m.TableStatuses { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *GetValidationErrorRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.ErrState != 0 { n += 1 + sovDmworker(uint64(m.ErrState)) } l = len(m.TaskName) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *ValidationError) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Id) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Source) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.SrcTable) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.SrcData) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.DstTable) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.DstData) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.ErrorType) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.Status != 0 { n += 1 + sovDmworker(uint64(m.Status)) } l = len(m.Time) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.Message) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *GetValidationErrorResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if len(m.Error) > 0 { for _, e := range m.Error { l = e.Size() n += 1 + l + sovDmworker(uint64(l)) } } return n } func (m *OperateValidationErrorRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Op != 0 { n += 1 + sovDmworker(uint64(m.Op)) } if m.IsAllError { n += 2 } l = len(m.TaskName) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } if m.ErrId != 0 { n += 1 + sovDmworker(uint64(m.ErrId)) } return n } func (m *OperateValidationErrorResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.Result { n += 2 } l = len(m.Msg) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func (m *UpdateValidationWorkerRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.TaskName) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.BinlogPos) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } l = len(m.BinlogGTID) if l > 0 { n += 1 + l + sovDmworker(uint64(l)) } return n } func sovDmworker(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozDmworker(x uint64) (n int) { return sovDmworker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *QueryStatusRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: QueryStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: QueryStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CommonWorkerResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CommonWorkerResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CommonWorkerResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Worker = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *QueryStatusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: QueryStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: QueryStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SourceStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.SourceStatus == nil { m.SourceStatus = &SourceStatus{} } if err := m.SourceStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SubTaskStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SubTaskStatus = append(m.SubTaskStatus, &SubTaskStatus{}) if err := m.SubTaskStatus[len(m.SubTaskStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Passed", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Passed = bool(v != 0) case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) } m.Total = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Total |= int32(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Successful", wireType) } m.Successful = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Successful |= int32(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) } m.Failed = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Failed |= int32(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Warning", wireType) } m.Warning = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Warning |= int32(b&0x7F) << shift if b < 0x80 { break } } case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Detail = append(m.Detail[:0], dAtA[iNdEx:postIndex]...) if m.Detail == nil { m.Detail = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DumpStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DumpStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DumpStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalTables", wireType) } m.TotalTables = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TotalTables |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field CompletedTables", wireType) } var v uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.CompletedTables = float64(math.Float64frombits(v)) case 3: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field FinishedBytes", wireType) } var v uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.FinishedBytes = float64(math.Float64frombits(v)) case 4: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field FinishedRows", wireType) } var v uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.FinishedRows = float64(math.Float64frombits(v)) case 5: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field EstimateTotalRows", wireType) } var v uint64 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.EstimateTotalRows = float64(math.Float64frombits(v)) case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Bps", wireType) } m.Bps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Bps |= int64(b&0x7F) << shift if b < 0x80 { break } } case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Progress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *LoadStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LoadStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LoadStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FinishedBytes", wireType) } m.FinishedBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.FinishedBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalBytes", wireType) } m.TotalBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TotalBytes |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Progress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MetaBinlog", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.MetaBinlog = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MetaBinlogGTID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.MetaBinlogGTID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Bps", wireType) } m.Bps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Bps |= int64(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ShardingGroup) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ShardingGroup: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ShardingGroup: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Target = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DDLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.DDLs = append(m.DDLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field FirstLocation", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.FirstLocation = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Synced", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Synced = append(m.Synced, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Unsynced", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Unsynced = append(m.Unsynced, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SyncStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SyncStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SyncStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalEvents", wireType) } m.TotalEvents = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TotalEvents |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalTps", wireType) } m.TotalTps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TotalTps |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RecentTps", wireType) } m.RecentTps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.RecentTps |= int64(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MasterBinlog", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.MasterBinlog = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MasterBinlogGtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.MasterBinlogGtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SyncerBinlog", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SyncerBinlog = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SyncerBinlogGtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SyncerBinlogGtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BlockingDDLs", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.BlockingDDLs = append(m.BlockingDDLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field UnresolvedGroups", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.UnresolvedGroups = append(m.UnresolvedGroups, &ShardingGroup{}) if err := m.UnresolvedGroups[len(m.UnresolvedGroups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Synced", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Synced = bool(v != 0) case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BinlogType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.BinlogType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field SecondsBehindMaster", wireType) } m.SecondsBehindMaster = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.SecondsBehindMaster |= int64(b&0x7F) << shift if b < 0x80 { break } } case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BlockDDLOwner", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.BlockDDLOwner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 14: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ConflictMsg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ConflictMsg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 15: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalRows", wireType) } m.TotalRows = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TotalRows |= int64(b&0x7F) << shift if b < 0x80 { break } } case 16: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalRps", wireType) } m.TotalRps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.TotalRps |= int64(b&0x7F) << shift if b < 0x80 { break } } case 17: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RecentRps", wireType) } m.RecentRps = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.RecentRps |= int64(b&0x7F) << shift if b < 0x80 { break } } case 18: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IoTotalBytes", wireType) } m.IoTotalBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.IoTotalBytes |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 19: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DumpIOTotalBytes", wireType) } m.DumpIOTotalBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.DumpIOTotalBytes |= uint64(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SourceStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SourceStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Worker = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.Result == nil { m.Result = &ProcessResult{} } if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RelayStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.RelayStatus == nil { m.RelayStatus = &RelayStatus{} } if err := m.RelayStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RelayStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RelayStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RelayStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MasterBinlog", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.MasterBinlog = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MasterBinlogGtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.MasterBinlogGtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RelaySubDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.RelaySubDir = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RelayBinlog", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.RelayBinlog = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RelayBinlogGtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.RelayBinlogGtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RelayCatchUpMaster", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.RelayCatchUpMaster = bool(v != 0) case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) } m.Stage = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Stage |= Stage(b&0x7F) << shift if b < 0x80 { break } } case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.Result == nil { m.Result = &ProcessResult{} } if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SubTaskStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SubTaskStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SubTaskStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) } m.Stage = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Stage |= Stage(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) } m.Unit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Unit |= UnitType(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.Result == nil { m.Result = &ProcessResult{} } if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field UnresolvedDDLLockID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.UnresolvedDDLLockID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Status = &SubTaskStatus_Msg{string(dAtA[iNdEx:postIndex])} iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Check", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &CheckStatus{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Status = &SubTaskStatus_Check{v} iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Dump", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &DumpStatus{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Status = &SubTaskStatus_Dump{v} iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Load", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &LoadStatus{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Status = &SubTaskStatus_Load{v} iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &SyncStatus{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Status = &SubTaskStatus_Sync{v} iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Validation", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.Validation == nil { m.Validation = &ValidationStatus{} } if err := m.Validation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SubTaskStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SubTaskStatusList: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SubTaskStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Status = append(m.Status, &SubTaskStatus{}) if err := m.Status[len(m.Status)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *DumpError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: DumpError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: DumpError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *LoadError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LoadError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LoadError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SyncSQLError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SyncSQLError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SyncSQLError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field FailedBinlogPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.FailedBinlogPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ErrorSQL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ErrorSQL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SyncError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SyncError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SyncError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Errors = append(m.Errors, &SyncSQLError{}) if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SourceError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SourceError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SourceError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Worker = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SourceError", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SourceError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RelayError", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.RelayError == nil { m.RelayError = &RelayError{} } if err := m.RelayError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *RelayError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: RelayError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: RelayError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SubTaskError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SubTaskError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SubTaskError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) } m.Stage = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Stage |= Stage(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) } m.Unit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Unit |= UnitType(b&0x7F) << shift if b < 0x80 { break } } case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Error = &SubTaskError_Msg{string(dAtA[iNdEx:postIndex])} iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Check", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &CheckError{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Error = &SubTaskError_Check{v} iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Dump", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &DumpError{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Error = &SubTaskError_Dump{v} iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Load", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &LoadError{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Error = &SubTaskError_Load{v} iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } v := &SyncError{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } m.Error = &SubTaskError_Sync{v} iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *SubTaskErrorList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: SubTaskErrorList: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: SubTaskErrorList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Error = append(m.Error, &SubTaskError{}) if err := m.Error[len(m.Error)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ProcessResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ProcessResult: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ProcessResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IsCanceled", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.IsCanceled = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Errors = append(m.Errors, &ProcessError{}) if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Detail = append(m.Detail[:0], dAtA[iNdEx:postIndex]...) if m.Detail == nil { m.Detail = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ProcessError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ProcessError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ProcessError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ErrCode", wireType) } m.ErrCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ErrCode |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ErrClass", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ErrClass = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ErrScope", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ErrScope = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ErrLevel", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ErrLevel = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RawCause", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.RawCause = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Workaround", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Workaround = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *PurgeRelayRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: PurgeRelayRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: PurgeRelayRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Inactive", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Inactive = bool(v != 0) case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } m.Time = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Time |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Filename = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SubDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SubDir = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateWorkerSchemaRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateWorkerSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateWorkerSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= SchemaOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Database = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Table = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Schema = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Flush = bool(v != 0) case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Sync = bool(v != 0) case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FromSource", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.FromSource = bool(v != 0) case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FromTarget", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.FromTarget = bool(v != 0) default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *V1SubTaskMeta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: V1SubTaskMeta: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: V1SubTaskMeta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= TaskOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) } m.Stage = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Stage |= Stage(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = append(m.Task[:0], dAtA[iNdEx:postIndex]...) if m.Task == nil { m.Task = []byte{} } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateV1MetaRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateV1MetaRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateV1MetaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= V1MetaOp(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateV1MetaResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateV1MetaResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateV1MetaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.Meta == nil { m.Meta = make(map[string]*V1SubTaskMeta) } var mapkey string var mapvalue *V1SubTaskMeta for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLenmapkey |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLenmapkey := int(stringLenmapkey) if intStringLenmapkey < 0 { return ErrInvalidLengthDmworker } postStringIndexmapkey := iNdEx + intStringLenmapkey if postStringIndexmapkey < 0 { return ErrInvalidLengthDmworker } if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } } if mapmsglen < 0 { return ErrInvalidLengthDmworker } postmsgIndex := iNdEx + mapmsglen if postmsgIndex < 0 { return ErrInvalidLengthDmworker } if postmsgIndex > l { return io.ErrUnexpectedEOF } mapvalue = &V1SubTaskMeta{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF } iNdEx += skippy } } m.Meta[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *HandleWorkerErrorRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: HandleWorkerErrorRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: HandleWorkerErrorRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= ErrorOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BinlogPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.BinlogPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Sqls", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Sqls = append(m.Sqls, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetWorkerCfgRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetWorkerCfgRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetWorkerCfgRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetWorkerCfgResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetWorkerCfgResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetWorkerCfgResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cfg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Cfg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckSubtasksCanUpdateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckSubtasksCanUpdateRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckSubtasksCanUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SubtaskCfgTomlString", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SubtaskCfgTomlString = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *CheckSubtasksCanUpdateResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: CheckSubtasksCanUpdateResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: CheckSubtasksCanUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Success = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetValidationStatusRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetValidationStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetValidationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.TaskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FilterStatus", wireType) } m.FilterStatus = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.FilterStatus |= Stage(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ValidationStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ValidationStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ValidationStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Task = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Mode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) } m.Stage = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Stage |= Stage(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ValidatorBinlog", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ValidatorBinlog = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ValidatorBinlogGtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ValidatorBinlogGtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } if m.Result == nil { m.Result = &ProcessResult{} } if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ProcessedRowsStatus", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ProcessedRowsStatus = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PendingRowsStatus", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.PendingRowsStatus = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ErrorRowsStatus", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ErrorRowsStatus = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CutoverBinlogPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.CutoverBinlogPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 12: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CutoverBinlogGtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.CutoverBinlogGtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ValidationTableStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ValidationTableStatus: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ValidationTableStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SrcTable", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SrcTable = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DstTable", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.DstTable = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) } m.Stage = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Stage |= Stage(b&0x7F) << shift if b < 0x80 { break } } case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetValidationStatusResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetValidationStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetValidationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Validators = append(m.Validators, &ValidationStatus{}) if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TableStatuses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.TableStatuses = append(m.TableStatuses, &ValidationTableStatus{}) if err := m.TableStatuses[len(m.TableStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetValidationErrorRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetValidationErrorRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetValidationErrorRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ErrState", wireType) } m.ErrState = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ErrState |= ValidateErrorState(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.TaskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ValidationError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ValidationError: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ValidationError: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Id = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SrcTable", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SrcTable = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SrcData", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.SrcData = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DstTable", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.DstTable = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DstData", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.DstData = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ErrorType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.ErrorType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Status |= ValidateErrorState(b&0x7F) << shift if b < 0x80 { break } } case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Time = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *GetValidationErrorResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: GetValidationErrorResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: GetValidationErrorResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Error = append(m.Error, &ValidationError{}) if err := m.Error[len(m.Error)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateValidationErrorRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateValidationErrorRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateValidationErrorRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) } m.Op = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Op |= ValidationErrOp(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IsAllError", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.IsAllError = bool(v != 0) case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.TaskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ErrId", wireType) } m.ErrId = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.ErrId |= uint64(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *OperateValidationErrorResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: OperateValidationErrorResponse: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: OperateValidationErrorResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ v |= int(b&0x7F) << shift if b < 0x80 { break } } m.Result = bool(v != 0) case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.Msg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *UpdateValidationWorkerRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: UpdateValidationWorkerRequest: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: UpdateValidationWorkerRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TaskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.TaskName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BinlogPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.BinlogPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field BinlogGTID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowDmworker } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthDmworker } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthDmworker } if postIndex > l { return io.ErrUnexpectedEOF } m.BinlogGTID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDmworker(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthDmworker } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipDmworker(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowDmworker } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowDmworker } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowDmworker } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthDmworker } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupDmworker } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthDmworker } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthDmworker = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowDmworker = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupDmworker = fmt.Errorf("proto: unexpected end of group") ) tiup-1.16.3/pkg/cluster/api/error.go000066400000000000000000000021171505422223000172520ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import "fmt" var ( // ErrNoStore is an empty NoStoreErr object, useful for type checking ErrNoStore = &NoStoreErr{} ) // NoStoreErr is the error that no store matching address can be found in PD type NoStoreErr struct { addr string } // Error implement the error interface func (e *NoStoreErr) Error() string { return fmt.Sprintf("no store matching address \"%s\" found", e.addr) } // Is implements the error interface func (e *NoStoreErr) Is(target error) bool { t, ok := target.(*NoStoreErr) if !ok { return false } return e.addr == t.addr || t.addr == "" } tiup-1.16.3/pkg/cluster/api/error_test.go000066400000000000000000000026021505422223000203100ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import ( "errors" "testing" "github.com/stretchr/testify/require" ) func TestNoStoreErrIs(t *testing.T) { err0 := &NoStoreErr{ addr: "1.2.3.4", } // identical errors are equal require.True(t, errors.Is(err0, err0)) require.True(t, errors.Is(ErrNoStore, ErrNoStore)) require.True(t, errors.Is(ErrNoStore, &NoStoreErr{})) require.True(t, errors.Is(&NoStoreErr{}, ErrNoStore)) // not equal for different error types require.False(t, errors.Is(err0, errors.New(""))) // default Value matches any error require.True(t, errors.Is(err0, ErrNoStore)) // error with values are not matching default ones require.False(t, errors.Is(ErrNoStore, err0)) err1 := &NoStoreErr{ addr: "2.3.4.5", } require.True(t, errors.Is(err1, ErrNoStore)) // errors with different values are not equal require.False(t, errors.Is(err0, err1)) require.False(t, errors.Is(err1, err0)) } tiup-1.16.3/pkg/cluster/api/pdapi.go000066400000000000000000001034301505422223000172160ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import ( "bytes" "context" "crypto/tls" "encoding/json" "errors" "fmt" "io" "math" "net/http" "net/url" "strconv" "strings" "time" "github.com/jeremywohl/flatten" perrs "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/mod/semver" ) // PDClient is an HTTP client of the PD server type PDClient struct { version string addrs []string tlsEnabled bool httpClient *utils.HTTPClient ctx context.Context } // LabelInfo represents an instance label info type LabelInfo struct { Machine string `json:"machine"` Port string `json:"port"` Store uint64 `json:"store"` Status string `json:"status"` Leaders int `json:"leaders"` Regions int `json:"regions"` Capacity string `json:"capacity"` Available string `json:"available"` Labels string `json:"labels"` } // NewPDClient returns a new PDClient, the context must have // a *logprinter.Logger as value of "logger" func NewPDClient( ctx context.Context, addrs []string, timeout time.Duration, tlsConfig *tls.Config, ) *PDClient { enableTLS := false if tlsConfig != nil { enableTLS = true } if _, ok := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger); !ok { panic("the context must have logger inside") } cli := &PDClient{ addrs: addrs, tlsEnabled: enableTLS, httpClient: utils.NewHTTPClient(timeout, tlsConfig), ctx: ctx, } cli.tryIdentifyVersion() return cli } func (pc *PDClient) l() *logprinter.Logger { return pc.ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) } func (pc *PDClient) tryIdentifyVersion() { endpoints := pc.getEndpoints(pdVersionURI) response := map[string]string{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &response) }) if err == nil { pc.version = response["version"] } } // GetURL builds the client URL of PDClient func (pc *PDClient) GetURL(addr string) string { httpPrefix := "http" if pc.tlsEnabled { httpPrefix = "https" } return fmt.Sprintf("%s://%s", httpPrefix, addr) } const ( // pdEvictLeaderName is evict leader scheduler name. pdEvictLeaderName = "evict-leader-scheduler" ) // nolint (some is unused now) var ( pdPingURI = "pd/ping" pdVersionURI = "pd/api/v1/version" pdConfigURI = "pd/api/v1/config" pdClusterIDURI = "pd/api/v1/cluster" pdConfigReplicate = "pd/api/v1/config/replicate" pdReplicationModeURI = "pd/api/v1/config/replication-mode" pdRulesURI = "pd/api/v1/config/rules" pdConfigSchedule = "pd/api/v1/config/schedule" pdLeaderURI = "pd/api/v1/leader" pdLeaderTransferURI = "pd/api/v1/leader/transfer" pdMembersURI = "pd/api/v1/members" pdMemberPriorityURI = "pd/api/v1/members/name/%s" pdSchedulersURI = "pd/api/v1/schedulers" pdStoreURI = "pd/api/v1/store" pdStoresURI = "pd/api/v1/stores" pdStoresLimitURI = "pd/api/v1/stores/limit" pdRemoveTombstone = "pd/api/v1/stores/remove-tombstone" pdRegionsCheckURI = "pd/api/v1/regions/check" pdServicePrimaryURI = "pd/api/v2/ms/primary" pdReadyURI = "pd/api/v2/ready" tsoHealthPrefix = "tso/api/v1/health" ) func tryURLs(endpoints []string, f func(endpoint string) ([]byte, error)) ([]byte, error) { if len(endpoints) == 0 { return nil, errors.New("no endpoint available") } var err error var bytes []byte for _, endpoint := range endpoints { var u *url.URL u, err = url.Parse(endpoint) if err != nil { return bytes, perrs.AddStack(err) } endpoint = u.String() bytes, err = f(endpoint) if err != nil { continue } return bytes, nil } if len(endpoints) > 1 && err != nil { err = perrs.Errorf("no endpoint available, the last err was: %s", err) } return bytes, err } func (pc *PDClient) getEndpoints(uri string) (endpoints []string) { for _, addr := range pc.addrs { endpoint := fmt.Sprintf("%s/%s", pc.GetURL(addr), uri) endpoints = append(endpoints, endpoint) } return } // CheckHealth checks the health of PD node func (pc *PDClient) CheckHealth() error { endpoints := pc.getEndpoints(pdPingURI) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, nil }) if err != nil { return err } return nil } // CheckTSOHealth checks the health of TSO service(which is a microservice component of PD) func (pc *PDClient) CheckTSOHealth(retryOpt *utils.RetryOption) error { endpoints := pc.getEndpoints(tsoHealthPrefix) if err := utils.Retry(func() error { var err error for _, endpoint := range endpoints { _, err = pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return err } } if err == nil { return nil } // return error by default, to make the retry work pc.l().Debugf("Still waiting for the PD microservice's TSO health") return perrs.New("Still waiting for the PD microservice's TSO health") }, *retryOpt); err != nil { return fmt.Errorf("error check PD microservice's TSO health, %v", err) } return nil } // GetStores queries the stores info from PD server func (pc *PDClient) GetStores() (*StoresInfo, error) { // Return all stores query := "?state=0&state=1&state=2" endpoints := pc.getEndpoints(pdStoresURI + query) storesInfo := StoresInfo{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &storesInfo) }) if err != nil { return nil, err } // Desc sorting the store list, we assume the store with largest ID is the // latest one. // Not necessary when we implement the workaround pd-3303 in GetCurrentStore() // sort.Slice(storesInfo.Stores, func(i int, j int) bool { // return storesInfo.Stores[i].Store.Id > storesInfo.Stores[j].Store.Id // }) return &storesInfo, nil } // GetCurrentStore gets the current store info of a given host func (pc *PDClient) GetCurrentStore(addr string) (*StoreInfo, error) { stores, err := pc.GetStores() if err != nil { return nil, err } // Find the store with largest ID var latestStore *StoreInfo for _, store := range stores.Stores { if store.Store.Address == addr { // Workaround of pd-3303: // If the PD leader has been switched multiple times, the store IDs // may be not monitonically assigned. To workaround this, we iterate // over the whole store list to see if any of the store's state is // not marked as "tombstone", then use that as the result. // See: https://github.com/tikv/pd/issues/3303 // // It's logically not necessary to find the store with largest ID // number anymore in this process, but we're keeping the behavior // as the reasonable approach would still be using the state from // latest store, and this is only a workaround. if store.Store.State != metapb.StoreState_Tombstone { return store, nil } if latestStore == nil { latestStore = store continue } if store.Store.Id > latestStore.Store.Id { latestStore = store } } } if latestStore != nil { return latestStore, nil } return nil, &NoStoreErr{addr: addr} } // WaitLeader wait until there's a leader or timeout. func (pc *PDClient) WaitLeader(retryOpt *utils.RetryOption) error { if retryOpt == nil { retryOpt = &utils.RetryOption{ Delay: time.Second * 1, Timeout: time.Second * 30, } } if err := utils.Retry(func() error { _, err := pc.GetLeader() if err == nil { return nil } // return error by default, to make the retry work pc.l().Debugf("Still waiting for the PD leader to be elected") return perrs.New("still waiting for the PD leader to be elected") }, *retryOpt); err != nil { return fmt.Errorf("error getting PD leader, %v", err) } return nil } // GetLeader queries the leader node of PD cluster func (pc *PDClient) GetLeader() (*pdpb.Member, error) { endpoints := pc.getEndpoints(pdLeaderURI) leader := pdpb.Member{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &leader) }) if err != nil { return nil, err } return &leader, nil } // GetMembers queries for member list from the PD server func (pc *PDClient) GetMembers() (*pdpb.GetMembersResponse, error) { endpoints := pc.getEndpoints(pdMembersURI) members := pdpb.GetMembersResponse{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &members) }) if err != nil { return nil, err } return &members, nil } // GetConfig returns all PD configs func (pc *PDClient) GetConfig() (map[string]any, error) { endpoints := pc.getEndpoints(pdConfigURI) // We don't use the `github.com/tikv/pd/server/config` directly because // there is compatible issue: https://github.com/pingcap/tiup/issues/637 pdConfig := map[string]any{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &pdConfig) }) if err != nil { return nil, err } return flatten.Flatten(pdConfig, "", flatten.DotStyle) } // GetClusterID return cluster ID func (pc *PDClient) GetClusterID() (uint64, error) { endpoints := pc.getEndpoints(pdClusterIDURI) var clusterID map[string]any _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } d := json.NewDecoder(bytes.NewBuffer(body)) d.UseNumber() clusterID = make(map[string]any) return nil, d.Decode(&clusterID) }) if err != nil { return 0, err } idStr := clusterID["id"].(json.Number).String() return strconv.ParseUint(idStr, 10, 64) } // GetDashboardAddress get the PD node address which runs dashboard func (pc *PDClient) GetDashboardAddress() (string, error) { cfg, err := pc.GetConfig() if err != nil { return "", perrs.AddStack(err) } addr, ok := cfg["pd-server.dashboard-address"].(string) if !ok { return "", perrs.New("cannot found dashboard address") } return addr, nil } // EvictPDLeader evicts the PD leader func (pc *PDClient) EvictPDLeader(retryOpt *utils.RetryOption) error { // get current members members, err := pc.GetMembers() if err != nil { return err } if len(members.Members) == 1 { pc.l().Warnf("Only 1 member in the PD cluster, skip leader evicting") return nil } // try to evict the leader cmd := fmt.Sprintf("%s/resign", pdLeaderURI) endpoints := pc.getEndpoints(cmd) _, err = tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Post(pc.ctx, endpoint, nil) if err != nil { return body, err } return body, nil }) if err != nil { return err } // wait for the transfer to complete if retryOpt == nil { retryOpt = &utils.RetryOption{ Delay: time.Second * 5, Timeout: time.Second * 300, } } if err := utils.Retry(func() error { currLeader, err := pc.GetLeader() if err != nil { return err } // check if current leader is the leader to evict if currLeader.Name != members.Leader.Name { return nil } // return error by default, to make the retry work pc.l().Debugf("Still waiting for the PD leader to transfer") return perrs.New("still waiting for the PD leader to transfer") }, *retryOpt); err != nil { return fmt.Errorf("error evicting PD leader, %v", err) } return nil } // pdSchedulerRequest is the request body when evicting store leader type pdSchedulerRequest struct { Name string `json:"name"` StoreID uint64 `json:"store_id"` } // EvictStoreLeader evicts the store leaders // The host parameter should be in format of IP:Port, that matches store's address func (pc *PDClient) EvictStoreLeader(host string, retryOpt *utils.RetryOption, countLeader func(string) (int, error)) error { // get info of current stores latestStore, err := pc.GetCurrentStore(host) if err != nil { if errors.Is(err, ErrNoStore) { return nil } return err } // XXX: the status address in store will be something like 0.0.0.0:20180 var leaderCount int if leaderCount, err = countLeader(latestStore.Store.Address); err != nil { return err } if leaderCount == 0 { // no store leader on the host, just skip return nil } pc.l().Infof("\tEvicting %d leaders from store %s...", leaderCount, latestStore.Store.Address) // set scheduler for stores scheduler, err := json.Marshal(pdSchedulerRequest{ Name: pdEvictLeaderName, StoreID: latestStore.Store.Id, }) if err != nil { return nil } endpoints := pc.getEndpoints(pdSchedulersURI) _, err = tryURLs(endpoints, func(endpoint string) ([]byte, error) { return pc.httpClient.Post(pc.ctx, endpoint, bytes.NewBuffer(scheduler)) }) if err != nil { return err } // wait for the transfer to complete if retryOpt == nil { retryOpt = &utils.RetryOption{ Delay: time.Second * 5, Timeout: time.Second * 600, } } if err := utils.Retry(func() error { currStore, err := pc.GetCurrentStore(host) if err != nil { if errors.Is(err, ErrNoStore) { return nil } return err } // check if all leaders are evicted if leaderCount, err = countLeader(currStore.Store.Address); err != nil { return err } if leaderCount == 0 { return nil } pc.l().Infof( "\t Still waiting for %d store leaders to transfer...", leaderCount, ) // return error by default, to make the retry work return perrs.New("still waiting for the store leaders to transfer") }, *retryOpt); err != nil { return fmt.Errorf("error evicting store leader from %s, %v", host, err) } return nil } // RecoverStoreLeader waits for some leaders to transfer back. // // Currently, recoverStoreLeader will be considered as succeed in any of the following case // // 1. 2/3 of leaders are already transferred back. // // 2. Original leader count is less than 200. // Though the accurate threshold is 57, it can be set to a larger value, for example 200. // Moreover, clusters which have small number of leaders are supposed to has low pressure, // and this recovering strategy may be unnecessary for them. Clusters in production env // usually has thousands of leaders. // // Since PD considers it as balance when the leader count delta is less than 10, so // these two conditions should be taken into consideration // // - When the original leader count is less than 20, there is possibility that // no leader will transfer back. // For example: The target store's leader count is 19. Other stores' leader count are 9. // There are 20 stores in total. In this case, there may be no leader to transfer back. // // - When the leader count is less than 57, there is possibility that only less than 2/3 // leaders are transferred back. `(N-10-9 >= 2/3*N) -> (N>=57)`. // For example: The target store's leader count is 56. Other stores' leader count are 46. // There are 57 stores in total. In this case, there may be only 37 leaders to transfer back, // and 37/56 < 2/3. Accordingly, if the target store's leader count is 57, then there may be // 38 leaders to transfer back, and 38/57 == 2/3. // // 3. The leader count has been unchanged for 5 times. func (pc *PDClient) RecoverStoreLeader(host string, originalCount int, retryOpt *utils.RetryOption, countLeader func(string) (int, error)) error { // When the leader count is less than certain number, just ignore recovering. if originalCount < 200 { return nil } targetCount := originalCount * 2 / 3 // The default leadership transfer timeout for one region is 10s, // so set the default value to about 10s (5*2s=10s). // NOTE: PD may not transfer leader to a newly started store in the future, // (check https://github.com/tikv/pd/pull/4762 for details), // so this strategy should also be enhanced later. maxUnchangedTimes := 5 // Get info of current stores. latestStore, err := pc.GetCurrentStore(host) if err != nil { if errors.Is(err, ErrNoStore) { return nil } return err } pc.l().Infof("\tRecovering about %d leaders to store %s, original count is %d...", targetCount, latestStore.Store.Address, originalCount) // Wait for the transfer to complete. if retryOpt == nil { retryOpt = &utils.RetryOption{ // The default timeout of evicting leader is 600s, so set the recovering timeout to // 2/3 of it should be reasonable. Besides, One local test shows it takes about // 30s to recover 3.6k leaders. Timeout: time.Second * 400, Delay: time.Second * 2, } } lastLeaderCount := math.MaxInt curUnchangedTimes := 0 if err := utils.Retry(func() error { currStore, err := pc.GetCurrentStore(host) if err != nil { if errors.Is(err, ErrNoStore) { return nil } return err } curLeaderCount, err := countLeader(currStore.Store.Address) if err != nil { return err } // Target number of leaders have been transferred back. if curLeaderCount >= targetCount { return nil } // Check if the leader count has been unchanged for certain times. if lastLeaderCount == curLeaderCount { curUnchangedTimes += 1 if curUnchangedTimes >= maxUnchangedTimes { pc.l().Warnf("\tSkip recovering leaders to %s, because leader count has been unchanged for %d times", host, maxUnchangedTimes) return nil } } else { lastLeaderCount = curLeaderCount curUnchangedTimes = 0 } pc.l().Infof( "\t Still waiting for at least %d leaders to transfer back...", targetCount-curLeaderCount, ) // Return error by default, to make the retry work. return perrs.New("still waiting for the store leaders to transfer back") }, *retryOpt); err != nil { return fmt.Errorf("error recovering store leader to %s, %v", host, err) } return nil } // RemoveStoreEvict removes a store leader evict scheduler, which allows following // leaders to be transffered to it again. func (pc *PDClient) RemoveStoreEvict(host string) error { // get info of current stores latestStore, err := pc.GetCurrentStore(host) if err != nil { return err } // remove scheduler for the store cmd := fmt.Sprintf( "%s/%s", pdSchedulersURI, fmt.Sprintf("%s-%d", pdEvictLeaderName, latestStore.Store.Id), ) endpoints := pc.getEndpoints(cmd) logger := pc.l() _, err = tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := pc.httpClient.Delete(pc.ctx, endpoint, nil) if err != nil { if statusCode == http.StatusNotFound || bytes.Contains(body, []byte("scheduler not found")) { logger.Debugf("Store leader evicting scheduler does not exist, ignore.") return body, nil } return body, err } logger.Debugf("Delete leader evicting scheduler of store %d success", latestStore.Store.Id) return body, nil }) if err != nil { return err } logger.Debugf("Removed store leader evicting scheduler from %s.", latestStore.Store.Address) return nil } // DelPD deletes a PD node from the cluster, name is the Name of the PD member func (pc *PDClient) DelPD(name string, retryOpt *utils.RetryOption) error { // get current members members, err := pc.GetMembers() if err != nil { return err } if len(members.Members) == 1 { return perrs.New("at least 1 PD node must be online, can not delete") } // try to delete the node cmd := fmt.Sprintf("%s/name/%s", pdMembersURI, name) endpoints := pc.getEndpoints(cmd) logger := pc.l() _, err = tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := pc.httpClient.Delete(pc.ctx, endpoint, nil) if err != nil { if statusCode == http.StatusNotFound || bytes.Contains(body, []byte("not found, pd")) { logger.Debugf("PD node does not exist, ignore: %s", body) return body, nil } return body, err } logger.Debugf("Delete PD %s from the cluster success", name) return body, nil }) if err != nil { return err } // wait for the deletion to complete if retryOpt == nil { retryOpt = &utils.RetryOption{ Delay: time.Second * 2, Timeout: time.Second * 60, } } if err := utils.Retry(func() error { currMembers, err := pc.GetMembers() if err != nil { return err } // check if the deleted member still present for _, member := range currMembers.Members { if member.Name == name { return perrs.New("still waiting for the PD node to be deleted") } } return nil }, *retryOpt); err != nil { return fmt.Errorf("error deleting PD node, %v", err) } return nil } func (pc *PDClient) isSameState(host string, state metapb.StoreState) (bool, error) { // get info of current stores storeInfo, err := pc.GetCurrentStore(host) if err != nil { return false, err } if storeInfo.Store.State == state { return true, nil } return false, nil } // IsTombStone check if the node is Tombstone. // The host parameter should be in format of IP:Port, that matches store's address func (pc *PDClient) IsTombStone(host string) (bool, error) { return pc.isSameState(host, metapb.StoreState_Tombstone) } // IsUp check if the node is Up state. // The host parameter should be in format of IP:Port, that matches store's address func (pc *PDClient) IsUp(host string) (bool, error) { return pc.isSameState(host, metapb.StoreState_Up) } // DelStore deletes stores from a (TiKV) host // The host parameter should be in format of IP:Port, that matches store's address func (pc *PDClient) DelStore(host string, retryOpt *utils.RetryOption) error { // get info of current stores storeInfo, err := pc.GetCurrentStore(host) if err != nil { if errors.Is(err, ErrNoStore) { return nil } return err } // get store ID of host storeID := storeInfo.Store.Id cmd := fmt.Sprintf("%s/%d", pdStoreURI, storeID) endpoints := pc.getEndpoints(cmd) logger := pc.l() _, err = tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := pc.httpClient.Delete(pc.ctx, endpoint, nil) if err != nil { if statusCode == http.StatusNotFound || bytes.Contains(body, []byte("not found")) { logger.Debugf("store %d %s does not exist, ignore: %s", storeID, host, body) return body, nil } return body, err } logger.Debugf("Delete store %d %s from the cluster success", storeID, host) return body, nil }) if err != nil { return err } // wait for the deletion to complete if retryOpt == nil { retryOpt = &utils.RetryOption{ Delay: time.Second * 2, Timeout: time.Second * 60, } } if err := utils.Retry(func() error { currStore, err := pc.GetCurrentStore(host) if err != nil { // the store does not exist anymore, just ignore and skip if errors.Is(err, ErrNoStore) { return nil } return err } if currStore.Store.Id == storeID { // deleting a store may take long time to transfer data, so we // return success once it get to "Offline" status and not waiting // for the whole process to complete. // When finished, the store's state will be "Tombstone". if currStore.Store.State != metapb.StoreState_Up { return nil } return perrs.New("still waiting for the store to be deleted") } return nil }, *retryOpt); err != nil { return fmt.Errorf("error deleting store, %v", err) } return nil } // RemoveTombstone remove tombstone instance func (pc *PDClient) RemoveTombstone() error { endpoints := pc.getEndpoints(pdRemoveTombstone) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { _, _, err := pc.httpClient.Delete(pc.ctx, endpoint, nil) return nil, err }) return err } func (pc *PDClient) updateConfig(url string, body io.Reader) error { endpoints := pc.getEndpoints(url) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { return pc.httpClient.Post(pc.ctx, endpoint, body) }) return err } // UpdateReplicateConfig updates the PD replication config func (pc *PDClient) UpdateReplicateConfig(body io.Reader) error { return pc.updateConfig(pdConfigReplicate, body) } // GetReplicateConfig gets the PD replication config func (pc *PDClient) GetReplicateConfig() ([]byte, error) { endpoints := pc.getEndpoints(pdConfigReplicate) return tryURLs(endpoints, func(endpoint string) ([]byte, error) { return pc.httpClient.Get(pc.ctx, endpoint) }) } // GetLocationLabels gets the replication.location-labels config from pd server func (pc *PDClient) GetLocationLabels() ([]string, bool, error) { config, err := pc.GetReplicateConfig() if err != nil { return nil, false, err } rc := PDReplicationConfig{} if err := json.Unmarshal(config, &rc); err != nil { return nil, false, perrs.Annotatef(err, "unmarshal replication config: %s", string(config)) } return rc.LocationLabels, rc.EnablePlacementRules, nil } // GetTiKVLabels implements TiKVLabelProvider func (pc *PDClient) GetTiKVLabels() (map[string]map[string]string, []map[string]LabelInfo, error) { r, err := pc.GetStores() if err != nil { return nil, nil, err } var storeInfo []map[string]LabelInfo locationLabels := map[string]map[string]string{} for _, s := range r.Stores { if s.Store.State == metapb.StoreState_Up { lbs := s.Store.GetLabels() host, port := utils.ParseHostPort(s.Store.GetAddress()) labelsMap := map[string]string{} var labelsArr []string for _, lb := range lbs { // Skip tiflash if lb.GetKey() != "tiflash" { labelsArr = append(labelsArr, fmt.Sprintf("%s: %s", lb.GetKey(), lb.GetValue())) labelsMap[lb.GetKey()] = lb.GetValue() } } locationLabels[s.Store.GetAddress()] = labelsMap label := fmt.Sprintf("%s%s%s", "{", strings.Join(labelsArr, ","), "}") storeInfo = append(storeInfo, map[string]LabelInfo{ host: { Machine: host, Port: port, Store: s.Store.GetId(), Status: s.Store.State.String(), Leaders: s.Status.LeaderCount, Regions: s.Status.RegionCount, Capacity: s.Status.Capacity.MarshalString(), Available: s.Status.Available.MarshalString(), Labels: label, }, }) } } return locationLabels, storeInfo, nil } // UpdateScheduleConfig updates the PD schedule config func (pc *PDClient) UpdateScheduleConfig(body io.Reader) error { return pc.updateConfig(pdConfigSchedule, body) } // CheckRegion queries for the region with specific status func (pc *PDClient) CheckRegion(state string) (*RegionsInfo, error) { uri := pdRegionsCheckURI + "/" + state endpoints := pc.getEndpoints(uri) regionsInfo := RegionsInfo{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, ®ionsInfo) }) return ®ionsInfo, err } // SetReplicationConfig sets a config key value of PD replication, it has the // same effect as `pd-ctl config set key value` func (pc *PDClient) SetReplicationConfig(key string, value int) error { // Only support for pd version >= v4.0.0 if pc.version == "" || semver.Compare(pc.version, "v4.0.0") < 0 { return nil } data := map[string]any{"set": map[string]any{key: value}} body, err := json.Marshal(data) if err != nil { return err } pc.l().Debugf("setting replication config: %s=%d", key, value) return pc.updateConfig(pdReplicationModeURI, bytes.NewBuffer(body)) } // SetAllStoreLimits sets store for all stores and types, it has the same effect // as `pd-ctl store limit all value` func (pc *PDClient) SetAllStoreLimits(value int) error { // Only support for pd version >= v4.0.0 if pc.version == "" || semver.Compare(pc.version, "v4.0.0") < 0 { return nil } data := map[string]any{"rate": value} body, err := json.Marshal(data) if err != nil { return err } pc.l().Debugf("setting store limit: %d", value) return pc.updateConfig(pdStoresLimitURI, bytes.NewBuffer(body)) } // GetServicePrimary queries for the primary of a service func (pc *PDClient) GetServicePrimary(service string) (string, error) { endpoints := pc.getEndpoints(fmt.Sprintf("%s/%s", pdServicePrimaryURI, service)) var primary string _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &primary) }) return primary, err } // SetLeaderPriority sets priority config value of PD member. func (pc *PDClient) SetLeaderPriority(name string, value int32) error { data := map[string]any{"leader-priority": value} body, err := json.Marshal(data) if err != nil { return err } pc.l().Debugf("setting leader_priority for %s: %d", name, value) return pc.updateConfig(fmt.Sprintf(pdMemberPriorityURI, name), bytes.NewBuffer(body)) } const ( tsoStatusURI = "status" schedulingStatusURI = "status" ) // TSOClient is an HTTP client of the TSO server type TSOClient struct { version string addrs []string tlsEnabled bool httpClient *utils.HTTPClient ctx context.Context } // NewTSOClient returns a new TSOClient, the context must have // a *logprinter.Logger as value of "logger" func NewTSOClient( ctx context.Context, addrs []string, timeout time.Duration, tlsConfig *tls.Config, ) *TSOClient { enableTLS := false if tlsConfig != nil { enableTLS = true } if _, ok := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger); !ok { panic("the context must have logger inside") } cli := &TSOClient{ addrs: addrs, tlsEnabled: enableTLS, httpClient: utils.NewHTTPClient(timeout, tlsConfig), ctx: ctx, } cli.tryIdentifyVersion() return cli } // func (tc *TSOClient) l() *logprinter.Logger { // return tc.ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) // } func (tc *TSOClient) tryIdentifyVersion() { endpoints := tc.getEndpoints(tsoStatusURI) response := map[string]string{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := tc.httpClient.Get(tc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &response) }) if err == nil { tc.version = response["version"] } } // GetURL builds the client URL of PDClient func (tc *TSOClient) GetURL(addr string) string { httpPrefix := "http" if tc.tlsEnabled { httpPrefix = "https" } return fmt.Sprintf("%s://%s", httpPrefix, addr) } func (tc *TSOClient) getEndpoints(uri string) (endpoints []string) { for _, addr := range tc.addrs { endpoint := fmt.Sprintf("%s/%s", tc.GetURL(addr), uri) endpoints = append(endpoints, endpoint) } return } // CheckHealth checks the health of TSO node. func (tc *TSOClient) CheckHealth() error { endpoints := tc.getEndpoints(tsoStatusURI) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := tc.httpClient.Get(tc.ctx, endpoint) if err != nil { return body, err } return body, nil }) if err != nil { return err } return nil } // CheckReady use the new api to test if PD has loaded all regions. func (pc *PDClient) CheckReady() error { endpoints := pc.getEndpoints(pdReadyURI) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := pc.httpClient.Get(pc.ctx, endpoint) if err != nil { return body, err } return body, nil }) return err } // SchedulingClient is an HTTP client of the scheduling server type SchedulingClient struct { version string addrs []string tlsEnabled bool httpClient *utils.HTTPClient ctx context.Context } // NewSchedulingClient returns a new SchedulingClient, the context must have // a *logprinter.Logger as value of "logger" func NewSchedulingClient( ctx context.Context, addrs []string, timeout time.Duration, tlsConfig *tls.Config, ) *SchedulingClient { enableTLS := false if tlsConfig != nil { enableTLS = true } if _, ok := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger); !ok { panic("the context must have logger inside") } cli := &SchedulingClient{ addrs: addrs, tlsEnabled: enableTLS, httpClient: utils.NewHTTPClient(timeout, tlsConfig), ctx: ctx, } cli.tryIdentifyVersion() return cli } // func (tc *SchedulingClient) l() *logprinter.Logger { // return tc.ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) // } func (tc *SchedulingClient) tryIdentifyVersion() { endpoints := tc.getEndpoints(schedulingStatusURI) response := map[string]string{} _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := tc.httpClient.Get(tc.ctx, endpoint) if err != nil { return body, err } return body, json.Unmarshal(body, &response) }) if err == nil { tc.version = response["version"] } } // GetURL builds the client URL of PDClient func (tc *SchedulingClient) GetURL(addr string) string { httpPrefix := "http" if tc.tlsEnabled { httpPrefix = "https" } return fmt.Sprintf("%s://%s", httpPrefix, addr) } func (tc *SchedulingClient) getEndpoints(uri string) (endpoints []string) { for _, addr := range tc.addrs { endpoint := fmt.Sprintf("%s/%s", tc.GetURL(addr), uri) endpoints = append(endpoints, endpoint) } return } // CheckHealth checks the health of scheduling node. func (tc *SchedulingClient) CheckHealth() error { endpoints := tc.getEndpoints(schedulingStatusURI) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, err := tc.httpClient.Get(tc.ctx, endpoint) if err != nil { return body, err } return body, nil }) if err != nil { return err } return nil } tiup-1.16.3/pkg/cluster/api/tidbapi.go000066400000000000000000000040001505422223000175260ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import ( "context" "crypto/tls" "fmt" "time" "github.com/pingcap/tiup/pkg/utils" ) // TiDBClient is client for access TiKVCDC Open API type TiDBClient struct { urls []string client *utils.HTTPClient ctx context.Context } // NewTiDBClient return a `TiDBClient` func NewTiDBClient(ctx context.Context, addresses []string, timeout time.Duration, tlsConfig *tls.Config) *TiDBClient { httpPrefix := "http" if tlsConfig != nil { httpPrefix = "https" } urls := make([]string, 0, len(addresses)) for _, addr := range addresses { urls = append(urls, fmt.Sprintf("%s://%s", httpPrefix, addr)) } return &TiDBClient{ urls: urls, client: utils.NewHTTPClient(timeout, tlsConfig), ctx: ctx, } } func (c *TiDBClient) getEndpoints(api string) (endpoints []string) { for _, url := range c.urls { endpoints = append(endpoints, fmt.Sprintf("%s%s", url, api)) } return endpoints } // StartUpgrade sends the start upgrade message to the TiDB server func (c *TiDBClient) StartUpgrade() error { api := "/upgrade/start" endpoints := c.getEndpoints(api) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { return c.client.Post(c.ctx, endpoint, nil) }) return err } // FinishUpgrade sends the finish upgrade message to the TiDB server func (c *TiDBClient) FinishUpgrade() error { api := "/upgrade/finish" endpoints := c.getEndpoints(api) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { return c.client.Post(c.ctx, endpoint, nil) }) return err } tiup-1.16.3/pkg/cluster/api/tikv_cdc.go000066400000000000000000000142611505422223000177120ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package api import ( "context" "crypto/tls" "encoding/json" "fmt" "net/http" "time" "github.com/pingcap/errors" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/utils" ) // TiKVCDCOpenAPIClient is client for access TiKVCDC Open API type TiKVCDCOpenAPIClient struct { urls []string client *utils.HTTPClient ctx context.Context } // NewTiKVCDCOpenAPIClient return a `TiKVCDCOpenAPIClient` func NewTiKVCDCOpenAPIClient(ctx context.Context, addresses []string, timeout time.Duration, tlsConfig *tls.Config) *TiKVCDCOpenAPIClient { httpPrefix := "http" if tlsConfig != nil { httpPrefix = "https" } urls := make([]string, 0, len(addresses)) for _, addr := range addresses { urls = append(urls, fmt.Sprintf("%s://%s", httpPrefix, addr)) } return &TiKVCDCOpenAPIClient{ urls: urls, client: utils.NewHTTPClient(timeout, tlsConfig), ctx: ctx, } } func (c *TiKVCDCOpenAPIClient) getEndpoints(api string) (endpoints []string) { for _, url := range c.urls { endpoints = append(endpoints, fmt.Sprintf("%s/%s", url, api)) } return endpoints } // ResignOwner resign the TiKV-CDC owner, and wait for a new owner be found func (c *TiKVCDCOpenAPIClient) ResignOwner() error { api := "api/v1/owner/resign" endpoints := c.getEndpoints(api) _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := c.client.PostWithStatusCode(c.ctx, endpoint, nil) if err != nil { if statusCode == http.StatusNotFound { c.l().Debugf("resign owner does not found, ignore it, err: %+v", err) return body, nil } return body, err } return body, nil }) if err != nil { return err } owner, err := c.GetOwner() if err != nil { return err } c.l().Debugf("tikv-cdc resign owner successfully, and new owner found, owner: %+v", owner) return nil } // GetOwner return the TiKV-CDC owner capture information func (c *TiKVCDCOpenAPIClient) GetOwner() (*TiKVCDCCapture, error) { captures, err := c.GetAllCaptures() if err != nil { return nil, err } for _, capture := range captures { if capture.IsOwner { return capture, nil } } return nil, fmt.Errorf("cannot found the tikv-cdc owner, query urls: %+v", c.urls) } // GetCaptureByAddr return the capture information by the address func (c *TiKVCDCOpenAPIClient) GetCaptureByAddr(addr string) (*TiKVCDCCapture, error) { captures, err := c.GetAllCaptures() if err != nil { return nil, err } for _, capture := range captures { if capture.AdvertiseAddr == addr { return capture, nil } } return nil, fmt.Errorf("capture not found, addr: %s", addr) } // GetAllCaptures return all captures instantaneously func (c *TiKVCDCOpenAPIClient) GetAllCaptures() (result []*TiKVCDCCapture, err error) { err = utils.Retry(func() error { result, err = c.queryAllCaptures() if err != nil { return err } return nil }, utils.RetryOption{ Timeout: 10 * time.Second, }) return result, err } func (c *TiKVCDCOpenAPIClient) queryAllCaptures() ([]*TiKVCDCCapture, error) { api := "api/v1/captures" endpoints := c.getEndpoints(api) var response []*TiKVCDCCapture _, err := tryURLs(endpoints, func(endpoint string) ([]byte, error) { body, statusCode, err := c.client.GetWithStatusCode(c.ctx, endpoint) if err != nil { if statusCode == http.StatusNotFound { // Ignore error, and return nil to trigger hard restart c.l().Debugf("get all captures failed, ignore it, err: %+v", err) return body, nil } return body, err } return body, json.Unmarshal(body, &response) }) return response, err } // IsCaptureAlive return error if the capture is not alive func (c *TiKVCDCOpenAPIClient) IsCaptureAlive() error { status, err := c.GetStatus() if err != nil { return err } if status.Liveness != TiKVCDCCaptureAlive { return fmt.Errorf("capture is not alive, request url: %+v", c.urls[0]) } return nil } // GetStatus return the status of the TiKVCDC server. func (c *TiKVCDCOpenAPIClient) GetStatus() (result TiKVCDCServerStatus, err error) { api := "api/v1/status" // client should only have address to the target TiKV-CDC server, not all. endpoints := c.getEndpoints(api) err = utils.Retry(func() error { data, statusCode, err := c.client.GetWithStatusCode(c.ctx, endpoints[0]) if err != nil { if statusCode == http.StatusNotFound { c.l().Debugf("capture server status failed, ignore it, err: %+v", err) return nil } err = json.Unmarshal(data, &result) if err != nil { return err } if result.Liveness == TiKVCDCCaptureAlive { return nil } return errors.New("capture status is not alive, retry it") } return nil }, utils.RetryOption{ Timeout: 10 * time.Second, }) return result, err } func (c *TiKVCDCOpenAPIClient) l() *logprinter.Logger { return c.ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) } // TiKVCDCLiveness is the liveness status of a capture. type TiKVCDCLiveness int32 const ( // TiKVCDCCaptureAlive means the capture is alive, and ready to serve. TiKVCDCCaptureAlive TiKVCDCLiveness = 0 // TiKVCDCCaptureStopping means the capture is in the process of graceful shutdown. TiKVCDCCaptureStopping TiKVCDCLiveness = 1 ) // TiKVCDCServerStatus holds some common information of a TiCDC server type TiKVCDCServerStatus struct { Version string `json:"version"` GitHash string `json:"git_hash"` ID string `json:"id"` Pid int `json:"pid"` IsOwner bool `json:"is_owner"` Liveness TiKVCDCLiveness `json:"liveness"` } // TiKVCDCCapture holds common information of a capture in cdc type TiKVCDCCapture struct { ID string `json:"id"` IsOwner bool `json:"is_owner"` AdvertiseAddr string `json:"address"` } tiup-1.16.3/pkg/cluster/api/types.go000066400000000000000000000140441505422223000172670ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // Some structs are manually copied from other packages like PD, to avoid // direct depends on them, which will make our dependency tree complicated // and hard to tidy and upgrade. package api import ( "time" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/tiup/pkg/cluster/api/typeutil" ) // PDReplicationConfig is the replication type configuration ReplicationConfig from PD. type PDReplicationConfig struct { // MaxReplicas is the number of replicas for each region. MaxReplicas uint64 `toml:"max-replicas" json:"max-replicas"` // The label keys specified the location of a store. // The placement priorities is implied by the order of label keys. // For example, ["zone", "rack"] means that we should place replicas to // different zones first, then to different racks if we don't have enough zones. LocationLabels typeutil.StringSlice `toml:"location-labels" json:"location-labels"` // StrictlyMatchLabel strictly checks if the label of TiKV is matched with LocationLabels. StrictlyMatchLabel bool `toml:"strictly-match-label" json:"strictly-match-label,string"` // When PlacementRules feature is enabled. MaxReplicas, LocationLabels and IsolationLabels are not used any more. EnablePlacementRules bool `toml:"enable-placement-rules" json:"enable-placement-rules,string"` // IsolationLevel is used to isolate replicas explicitly and forcibly if it's not empty. // Its value must be empty or one of LocationLabels. // Example: // location-labels = ["zone", "rack", "host"] // isolation-level = "zone" // With configuration like above, PD ensure that all replicas be placed in different zones. // Even if a zone is down, PD will not try to make up replicas in other zone // because other zones already have replicas on it. IsolationLevel string `toml:"isolation-level" json:"isolation-level"` } // MetaStore contains meta information about a store. type MetaStore struct { *metapb.Store StateName string `json:"state_name"` } // StoreStatus contains status about a store. type StoreStatus struct { Capacity typeutil.ByteSize `json:"capacity"` Available typeutil.ByteSize `json:"available"` UsedSize typeutil.ByteSize `json:"used_size"` LeaderCount int `json:"leader_count"` LeaderWeight float64 `json:"leader_weight"` LeaderScore float64 `json:"leader_score"` LeaderSize int64 `json:"leader_size"` RegionCount int `json:"region_count"` RegionWeight float64 `json:"region_weight"` RegionScore float64 `json:"region_score"` RegionSize int64 `json:"region_size"` SlowScore uint64 `json:"slow_score,omitempty"` // added omitempty SendingSnapCount uint32 `json:"sending_snap_count,omitempty"` ReceivingSnapCount uint32 `json:"receiving_snap_count,omitempty"` ApplyingSnapCount uint32 `json:"applying_snap_count,omitempty"` IsBusy bool `json:"is_busy,omitempty"` StartTS *time.Time `json:"start_ts,omitempty"` LastHeartbeatTS *time.Time `json:"last_heartbeat_ts,omitempty"` Uptime *typeutil.Duration `json:"uptime,omitempty"` } // StoreInfo contains information about a store. type StoreInfo struct { Store *MetaStore `json:"store"` Status *StoreStatus `json:"status"` } // StoresInfo records stores' info. type StoresInfo struct { Count int `json:"count"` Stores []*StoreInfo `json:"stores"` } // ReplicationStatus represents the replication mode status of the region. type ReplicationStatus struct { State string `json:"state"` StateID uint64 `json:"state_id"` } // MetaPeer is api compatible with *metapb.Peer. type MetaPeer struct { *metapb.Peer // RoleName is `Role.String()`. // Since Role is serialized as int by json by default, // introducing it will make the output of pd-ctl easier to identify Role. RoleName string `json:"role_name"` // IsLearner is `Role == "Learner"`. // Since IsLearner was changed to Role in kvproto in 5.0, this field was introduced to ensure api compatibility. IsLearner bool `json:"is_learner,omitempty"` } // PDPeerStats is api compatible with *pdpb.PeerStats. // NOTE: This type is exported by HTTP API. Please pay more attention when modifying it. type PDPeerStats struct { *pdpb.PeerStats Peer MetaPeer `json:"peer"` } // RegionInfo records detail region info for api usage. type RegionInfo struct { ID uint64 `json:"id"` StartKey string `json:"start_key"` EndKey string `json:"end_key"` RegionEpoch *metapb.RegionEpoch `json:"epoch,omitempty"` Peers []MetaPeer `json:"peers,omitempty"` Leader MetaPeer `json:"leader"` DownPeers []PDPeerStats `json:"down_peers,omitempty"` PendingPeers []MetaPeer `json:"pending_peers,omitempty"` WrittenBytes uint64 `json:"written_bytes"` ReadBytes uint64 `json:"read_bytes"` WrittenKeys uint64 `json:"written_keys"` ReadKeys uint64 `json:"read_keys"` ApproximateSize int64 `json:"approximate_size"` ApproximateKeys int64 `json:"approximate_keys"` Buckets []string `json:"buckets,omitempty"` ReplicationStatus *ReplicationStatus `json:"replication_status,omitempty"` } // RegionsInfo contains some regions with the detailed region info. type RegionsInfo struct { Count int `json:"count"` Regions []*RegionInfo `json:"regions"` } tiup-1.16.3/pkg/cluster/api/typeutil/000077500000000000000000000000001505422223000174505ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/api/typeutil/README.md000066400000000000000000000001311505422223000207220ustar00rootroot00000000000000This package is imported from [PD](https://github.com/tikv/pd/blob/master/pkg/typeutil). tiup-1.16.3/pkg/cluster/api/typeutil/duration.go000066400000000000000000000033541505422223000216310ustar00rootroot00000000000000// Copyright 2016 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package typeutil import ( "fmt" "strconv" "time" "github.com/pingcap/errors" ) // Duration is a wrapper of time.Duration for TOML and JSON. type Duration struct { time.Duration } // NewDuration creates a Duration from time.Duration. func NewDuration(duration time.Duration) Duration { return Duration{Duration: duration} } // MarshalJSON returns the duration as a JSON string. func (d *Duration) MarshalJSON() ([]byte, error) { return fmt.Appendf(nil, `"%s"`, d.String()), nil } // UnmarshalJSON parses a JSON string into the duration. func (d *Duration) UnmarshalJSON(text []byte) error { s, err := strconv.Unquote(string(text)) if err != nil { return errors.WithStack(err) } duration, err := time.ParseDuration(s) if err != nil { return errors.WithStack(err) } d.Duration = duration return nil } // UnmarshalText parses a TOML string into the duration. func (d *Duration) UnmarshalText(text []byte) error { var err error d.Duration, err = time.ParseDuration(string(text)) return errors.WithStack(err) } // MarshalText returns the duration as a JSON string. func (d Duration) MarshalText() ([]byte, error) { return []byte(d.String()), nil } tiup-1.16.3/pkg/cluster/api/typeutil/duration_test.go000066400000000000000000000024451505422223000226700ustar00rootroot00000000000000// Copyright 2016 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package typeutil import ( "encoding/json" "testing" "github.com/BurntSushi/toml" "github.com/stretchr/testify/require" ) type example struct { Interval Duration `json:"interval" toml:"interval"` } func TestDurationJSON(t *testing.T) { ex := &example{} text := []byte(`{"interval":"1h1m1s"}`) require.NoError(t, json.Unmarshal(text, ex)) require.Equal(t, float64(60*60+60+1), ex.Interval.Seconds()) b, err := json.Marshal(ex) require.NoError(t, err) require.Equal(t, string(text), string(b)) } func TestDurationTOML(t *testing.T) { ex := &example{} text := []byte(`interval = "1h1m1s"`) require.NoError(t, toml.Unmarshal(text, ex)) require.Equal(t, float64(60*60+60+1), ex.Interval.Seconds()) } tiup-1.16.3/pkg/cluster/api/typeutil/size.go000066400000000000000000000034621505422223000207560ustar00rootroot00000000000000// Copyright 2017 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package typeutil import ( "strconv" "github.com/docker/go-units" "github.com/pingcap/errors" ) // ByteSize is a retype uint64 for TOML and JSON. type ByteSize uint64 // ParseMBFromText parses MB from text. func ParseMBFromText(text string, value uint64) uint64 { b := ByteSize(0) err := b.UnmarshalText([]byte(text)) if err != nil { return value } return uint64(b / units.MiB) } // MarshalJSON returns the size as a JSON string. func (b ByteSize) MarshalJSON() ([]byte, error) { return []byte(`"` + units.BytesSize(float64(b)) + `"`), nil } // MarshalString returns the size as a string. func (b ByteSize) MarshalString() string { return units.BytesSize(float64(b)) } // UnmarshalJSON parses a JSON string into the byte size. func (b *ByteSize) UnmarshalJSON(text []byte) error { s, err := strconv.Unquote(string(text)) if err != nil { return errors.WithStack(err) } v, err := units.RAMInBytes(s) if err != nil { return errors.WithStack(err) } *b = ByteSize(v) return nil } // UnmarshalText parses a Toml string into the byte size. func (b *ByteSize) UnmarshalText(text []byte) error { v, err := units.RAMInBytes(string(text)) if err != nil { return errors.WithStack(err) } *b = ByteSize(v) return nil } tiup-1.16.3/pkg/cluster/api/typeutil/size_test.go000066400000000000000000000030311505422223000220050ustar00rootroot00000000000000// Copyright 2017 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package typeutil import ( "encoding/json" "testing" "github.com/docker/go-units" "github.com/stretchr/testify/require" ) func TestSizeJSON(t *testing.T) { t.Parallel() re := require.New(t) b := ByteSize(265421587) o, err := json.Marshal(b) re.NoError(err) var nb ByteSize err = json.Unmarshal(o, &nb) re.NoError(err) b = ByteSize(1756821276000) o, err = json.Marshal(b) re.NoError(err) re.Equal(`"1.598TiB"`, string(o)) } func TestParseMbFromText(t *testing.T) { t.Parallel() re := require.New(t) testCases := []struct { body []string size uint64 }{{ body: []string{"10Mib", "10MiB", "10M", "10MB"}, size: uint64(10), }, { body: []string{"10GiB", "10Gib", "10G", "10GB"}, size: uint64(10 * units.GiB / units.MiB), }, { body: []string{"10yiB", "10aib"}, size: uint64(1), }} for _, testCase := range testCases { for _, b := range testCase.body { re.Equal(int(testCase.size), int(ParseMBFromText(b, 1))) } } } tiup-1.16.3/pkg/cluster/api/typeutil/string_slice.go000066400000000000000000000023231505422223000224640ustar00rootroot00000000000000// Copyright 2017 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package typeutil import ( "strconv" "strings" "github.com/pingcap/errors" ) // StringSlice is more friendly to json encode/decode type StringSlice []string // MarshalJSON returns the size as a JSON string. func (s StringSlice) MarshalJSON() ([]byte, error) { return []byte(strconv.Quote(strings.Join(s, ","))), nil } // UnmarshalJSON parses a JSON string into the byte size. func (s *StringSlice) UnmarshalJSON(text []byte) error { data, err := strconv.Unquote(string(text)) if err != nil { return errors.WithStack(err) } if len(data) == 0 { *s = []string{} return nil } *s = strings.Split(data, ",") return nil } tiup-1.16.3/pkg/cluster/api/typeutil/string_slice_test.go000066400000000000000000000023301505422223000235210ustar00rootroot00000000000000// Copyright 2017 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package typeutil import ( "encoding/json" "testing" "github.com/stretchr/testify/require" ) func TestStringSliceJSON(t *testing.T) { b := StringSlice([]string{"zone", "rack"}) o, err := json.Marshal(b) require.NoError(t, err) require.Equal(t, "\"zone,rack\"", string(o)) var nb StringSlice err = json.Unmarshal(o, &nb) require.NoError(t, err) require.Equal(t, b, nb) } func TestStringSliceEmpty(t *testing.T) { ss := StringSlice([]string{}) b, err := json.Marshal(ss) require.NoError(t, err) require.Equal(t, "\"\"", string(b)) var ss2 StringSlice require.NoError(t, ss2.UnmarshalJSON(b)) require.Equal(t, ss, ss2) } tiup-1.16.3/pkg/cluster/audit/000077500000000000000000000000001505422223000161265ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/audit/audit.go000066400000000000000000000165141505422223000175720ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package audit import ( "bufio" "encoding/json" "fmt" "net/url" "os" "path/filepath" "sort" "strings" "time" "github.com/fatih/color" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/base52" "github.com/pingcap/tiup/pkg/crypto/rand" "github.com/pingcap/tiup/pkg/tui" tiuputils "github.com/pingcap/tiup/pkg/utils" ) const ( // EnvNameAuditID is the alternative ID appended to time based audit ID EnvNameAuditID = "TIUP_AUDIT_ID" ) // CommandArgs returns the original commands from the first line of a file func CommandArgs(fp string) ([]string, error) { file, err := os.Open(fp) if err != nil { return nil, errors.Trace(err) } defer file.Close() scanner := bufio.NewScanner(file) if !scanner.Scan() { return nil, errors.New("unknown audit log format") } args := strings.Split(scanner.Text(), " ") return decodeCommandArgs(args) } // encodeCommandArgs encode args with url.QueryEscape func encodeCommandArgs(args []string) []string { encoded := []string{} for _, arg := range args { encoded = append(encoded, url.QueryEscape(arg)) } return encoded } // decodeCommandArgs decode args with url.QueryUnescape func decodeCommandArgs(args []string) ([]string, error) { decoded := []string{} for _, arg := range args { a, err := url.QueryUnescape(arg) if err != nil { return nil, errors.Annotate(err, "failed on decode the command line of audit log") } decoded = append(decoded, a) } return decoded, nil } // ShowAuditList show the audit list. func ShowAuditList(dir string) error { // Header clusterTable := [][]string{{"ID", "Time", "Command"}} auditList, err := GetAuditList(dir) if err != nil { return err } for _, item := range auditList { clusterTable = append(clusterTable, []string{ item.ID, item.Time, item.Command, }) } tui.PrintTable(clusterTable, true) return nil } // Item represents a single audit item type Item struct { ID string `json:"id"` Time string `json:"time"` Command string `json:"command"` } // GetAuditList get the audit item list func GetAuditList(dir string) ([]Item, error) { fileInfos, err := os.ReadDir(dir) if err != nil { return nil, err } auditList := []Item{} for _, fi := range fileInfos { if fi.IsDir() { continue } t, err := decodeAuditID(fi.Name()) if err != nil { continue } args, err := CommandArgs(filepath.Join(dir, fi.Name())) if err != nil { continue } cmd := strings.Join(args, " ") auditList = append(auditList, Item{ ID: fi.Name(), Time: t.Format(time.RFC3339), Command: cmd, }) } sort.Slice(auditList, func(i, j int) bool { return auditList[i].Time < auditList[j].Time }) return auditList, nil } // OutputAuditLog outputs audit log. func OutputAuditLog(dir, fileSuffix string, data []byte) error { auditID := base52.Encode(time.Now().UnixNano() + rand.Int63n(1000)) if customID := os.Getenv(EnvNameAuditID); customID != "" { auditID = fmt.Sprintf("%s_%s", auditID, customID) } if fileSuffix != "" { auditID = fmt.Sprintf("%s_%s", auditID, fileSuffix) } fname := filepath.Join(dir, auditID) f, err := os.Create(fname) if err != nil { return errors.Annotate(err, "create audit log") } defer f.Close() args := encodeCommandArgs(os.Args) if _, err := f.Write([]byte(strings.Join(args, " ") + "\n")); err != nil { return errors.Annotate(err, "write audit log") } if _, err := f.Write(data); err != nil { return errors.Annotate(err, "write audit log") } return nil } // ShowAuditLog show the audit with the specified auditID func ShowAuditLog(dir string, auditID string) error { path := filepath.Join(dir, auditID) if tiuputils.IsNotExist(path) { return errors.Errorf("cannot find the audit log '%s'", auditID) } t, err := decodeAuditID(auditID) if err != nil { return errors.Annotatef(err, "unrecognized audit id '%s'", auditID) } content, err := os.ReadFile(path) if err != nil { return errors.Trace(err) } hint := fmt.Sprintf("- OPERATION TIME: %s -", t.Format("2006-01-02T15:04:05")) line := strings.Repeat("-", len(hint)) _, _ = os.Stdout.WriteString(color.MagentaString("%s\n%s\n%s\n", line, hint, line)) _, _ = os.Stdout.Write(content) return nil } // decodeAuditID decodes the auditID to unix timestamp func decodeAuditID(auditID string) (time.Time, error) { tsID := auditID if strings.Contains(auditID, "_") { tsID = strings.Split(auditID, "_")[0] } ts, err := base52.Decode(tsID) if err != nil { return time.Time{}, err } // compatible with old second based ts if ts>>32 > 0 { ts /= 1e9 } t := time.Unix(ts, 0) return t, nil } type deleteAuditLog struct { Files []string `json:"files"` Size int64 `json:"size"` Count int `json:"count"` DelBeforeTime time.Time `json:"delete_before_time"` // audit logs before `DelBeforeTime` will be deleted } // DeleteAuditLog cleanup audit log func DeleteAuditLog(dir string, retainDays int, skipConfirm bool, displayMode string) error { if retainDays < 0 { return errors.Errorf("retainDays cannot be less than 0") } deleteLog := &deleteAuditLog{ Files: []string{}, Size: 0, Count: 0, } // audit logs before `DelBeforeTime` will be deleted oneDayDuration, _ := time.ParseDuration("-24h") deleteLog.DelBeforeTime = time.Now().Add(oneDayDuration * time.Duration(retainDays)) fileInfos, err := os.ReadDir(dir) if err != nil { return err } for _, f := range fileInfos { if f.IsDir() { continue } t, err := decodeAuditID(f.Name()) if err != nil { continue } if t.Before(deleteLog.DelBeforeTime) { info, err := f.Info() if err != nil { continue } deleteLog.Size += info.Size() deleteLog.Count++ deleteLog.Files = append(deleteLog.Files, filepath.Join(dir, f.Name())) } } // output format json if displayMode == "json" { data, err := json.Marshal(struct { *deleteAuditLog `json:"deleted_logs"` }{deleteLog}) if err != nil { return err } fmt.Println(string(data)) } else { // print table fmt.Printf("Audit logs before %s will be deleted!\nFiles to be %s are:\n %s\nTotal count: %d \nTotal size: %s\n", color.HiYellowString(deleteLog.DelBeforeTime.Format("2006-01-02T15:04:05")), color.HiYellowString("deleted"), strings.Join(deleteLog.Files, "\n "), deleteLog.Count, readableSize(deleteLog.Size), ) if !skipConfirm { if err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]:"); err != nil { return err } } } for _, f := range deleteLog.Files { if err := os.Remove(f); err != nil { return err } } if displayMode != "json" { fmt.Println("clean audit log successfully") } return nil } func readableSize(b int64) string { const unit = 1024 if b < unit { return fmt.Sprintf("%d B", b) } div, exp := int64(unit), 0 for n := b / unit; n >= unit; n /= unit { div *= unit exp++ } return fmt.Sprintf("%.2f %cB", float64(b)/float64(div), "kMGTPE"[exp]) } tiup-1.16.3/pkg/cluster/audit/audit_test.go000066400000000000000000000066011505422223000206250ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package audit import ( "fmt" "io" "os" "path" "path/filepath" "runtime" "strings" "testing" "time" "github.com/pingcap/tiup/pkg/base52" "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) func currentDir() string { _, file, _, _ := runtime.Caller(0) return filepath.Dir(file) } func auditDir() string { return path.Join(currentDir(), "testdata", "audit") } func resetDir() { _ = os.RemoveAll(auditDir()) _ = os.MkdirAll(auditDir(), 0o777) } func readFakeStdout(f io.ReadSeeker) string { _, _ = f.Seek(0, 0) read, _ := io.ReadAll(f) return string(read) } func TestOutputAuditLog(t *testing.T) { dir := auditDir() resetDir() var g errgroup.Group for range 20 { g.Go(func() error { return OutputAuditLog(dir, "", []byte("audit log")) }) } err := g.Wait() require.NoError(t, err) var paths []string err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if !info.IsDir() { paths = append(paths, path) } return nil }) require.NoError(t, err) require.Equal(t, 20, len(paths)) } func TestShowAuditLog(t *testing.T) { dir := auditDir() resetDir() originStdout := os.Stdout defer func() { os.Stdout = originStdout }() fakeStdout := path.Join(currentDir(), "fake-stdout") defer os.Remove(fakeStdout) openStdout := func() *os.File { _ = os.Remove(fakeStdout) f, err := os.OpenFile(fakeStdout, os.O_CREATE|os.O_RDWR, 0o644) require.NoError(t, err) os.Stdout = f return f } second := int64(1604413577) nanoSecond := int64(1604413624836105381) fname := filepath.Join(dir, base52.Encode(second)) require.NoError(t, os.WriteFile(fname, []byte("test with second"), 0o644)) fname = filepath.Join(dir, base52.Encode(nanoSecond)) require.NoError(t, os.WriteFile(fname, []byte("test with nanosecond"), 0o644)) f := openStdout() require.NoError(t, ShowAuditList(dir)) // tabby table size is based on column width, while time.RFC3339 maybe print out timezone like +08:00 or Z(UTC) // skip the first two lines list := strings.Join(strings.Split(readFakeStdout(f), "\n")[2:], "\n") require.Equal(t, fmt.Sprintf(`4F7ZTL %s test with second ftmpqzww84Q %s test with nanosecond `, time.Unix(second, 0).Format(time.RFC3339), time.Unix(nanoSecond/1e9, 0).Format(time.RFC3339), ), list) f.Close() f = openStdout() require.NoError(t, ShowAuditLog(dir, "4F7ZTL")) require.Equal(t, fmt.Sprintf(`--------------------------------------- - OPERATION TIME: %s - --------------------------------------- test with second`, time.Unix(second, 0).Format("2006-01-02T15:04:05")), readFakeStdout(f)) f.Close() f = openStdout() require.NoError(t, ShowAuditLog(dir, "ftmpqzww84Q")) require.Equal(t, fmt.Sprintf(`--------------------------------------- - OPERATION TIME: %s - --------------------------------------- test with nanosecond`, time.Unix(nanoSecond/1e9, 0).Format("2006-01-02T15:04:05")), readFakeStdout(f)) f.Close() } tiup-1.16.3/pkg/cluster/clusterutil/000077500000000000000000000000001505422223000173775ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/clusterutil/cluster.go000066400000000000000000000042771505422223000214210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package clusterutil import ( "os" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" ) // Repository exports interface to tiup-cluster type Repository interface { DownloadComponent(comp, version, target string) error VerifyComponent(comp, version, target string) error ComponentBinEntry(comp, version string) (string, error) } type repositoryT struct { repo repository.Repository } // NewRepository returns repository func NewRepository(os, arch string) (Repository, error) { repo := environment.GlobalEnv().V1Repository().WithOptions(repository.Options{ GOOS: os, GOARCH: arch, DisableDecompress: true, }) return &repositoryT{repo}, nil } func (r *repositoryT) DownloadComponent(comp, version, target string) error { versionItem, err := r.repo.ComponentVersion(comp, version, false) if err != nil { return err } return r.repo.DownloadComponent(versionItem, target) } func (r *repositoryT) VerifyComponent(comp, version, target string) error { versionItem, err := r.repo.ComponentVersion(comp, version, true) if err != nil { return err } file, err := os.Open(target) if err != nil { return err } defer file.Close() if err := utils.CheckSHA256(file, versionItem.Hashes[v1manifest.SHA256]); err != nil { return errors.Errorf("validation failed for %s: %s", target, err) } return nil } func (r *repositoryT) ComponentBinEntry(comp, version string) (string, error) { versionItem, err := r.repo.ComponentVersion(comp, version, true) if err != nil { return "", err } return versionItem.Entry, nil } tiup-1.16.3/pkg/cluster/clusterutil/cluster_name.go000066400000000000000000000027311505422223000224120ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package clusterutil import ( "regexp" "github.com/joomcode/errorx" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) var ( // ErrInvalidClusterName is an error for invalid cluster name. You should use `ValidateClusterNameOrError()` // to generate this error. ErrInvalidClusterName = errorx.CommonErrors.NewType("invalid_cluster_name", utils.ErrTraitPreCheck) ) var ( clusterNameRegexp = regexp.MustCompile(`^[a-zA-Z0-9\-_\.]+$`) ) // ValidateClusterNameOrError validates a cluster name and returns error if the name is invalid. func ValidateClusterNameOrError(n string) error { if len(n) == 0 { return ErrInvalidClusterName. New("Cluster name must not be empty") } if !clusterNameRegexp.MatchString(n) { return ErrInvalidClusterName. New("Cluster name '%s' is invalid", n). WithProperty(tui.SuggestionFromString("The cluster name should only contain alphabets, numbers, hyphen (-), underscore (_), and dot (.).")) } return nil } tiup-1.16.3/pkg/cluster/ctxt/000077500000000000000000000000001505422223000160025ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/ctxt/context.go000066400000000000000000000126741505422223000200270ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ctxt import ( "context" "runtime" "sync" "time" "github.com/pingcap/tiup/pkg/checkpoint" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/utils/mock" ) type contextKey string const ( ctxKey = contextKey("TASK_CONTEXT") ) const ( // CtxBaseTopo is key of store the base topology in context.Context CtxBaseTopo = contextKey("BASE_TOPO") ) type ( // Executor is the executor interface for TiUP, all tasks will in the end // be passed to an executor and then be actually performed. Executor interface { // Execute run the command, then return its stdout and stderr // NOTE: stdin is not supported as it seems we don't need it (for now). If // at some point in the future we need to pass stdin to a command, we'll // need to refactor this function and its implementations. // If the cmd can't quit in timeout, it will return error, the default timeout is 60 seconds. Execute(ctx context.Context, cmd string, sudo bool, timeout ...time.Duration) (stdout []byte, stderr []byte, err error) // Transfer copies files from or to a target Transfer(ctx context.Context, src, dst string, download bool, limit int, compress bool) error } // ExecutorGetter get the executor by host. ExecutorGetter interface { Get(host string) (e Executor) // GetSSHKeySet gets the SSH private and public key path GetSSHKeySet() (privateKeyPath, publicKeyPath string) } // Context is used to share state while multiple tasks execution. // We should use mutex to prevent concurrent R/W for some fields // because of the same context can be shared in parallel tasks. Context struct { mutex sync.RWMutex Ev EventBus exec struct { executors map[string]Executor stdouts map[string][]byte stderrs map[string][]byte checkResults map[string][]any } // The private/public key is used to access remote server via the user `tidb` PrivateKeyPath string PublicKeyPath string Concurrency int // max number of parallel tasks running at the same time } ) // New create a context instance. func New(ctx context.Context, limit int, logger *logprinter.Logger) context.Context { concurrency := runtime.NumCPU() if limit > 0 { concurrency = limit } return context.WithValue( context.WithValue( checkpoint.NewContext(ctx), logprinter.ContextKeyLogger, logger, ), ctxKey, &Context{ mutex: sync.RWMutex{}, Ev: NewEventBus(), exec: struct { executors map[string]Executor stdouts map[string][]byte stderrs map[string][]byte checkResults map[string][]any }{ executors: make(map[string]Executor), stdouts: make(map[string][]byte), stderrs: make(map[string][]byte), checkResults: make(map[string][]any), }, Concurrency: concurrency, // default to CPU count }, ) } // GetInner return *Context from context.Context's value func GetInner(ctx context.Context) *Context { return ctx.Value(ctxKey).(*Context) } // Get implements the operation.ExecutorGetter interface. func (ctx *Context) Get(host string) (e Executor) { ctx.mutex.Lock() e, ok := ctx.exec.executors[host] ctx.mutex.Unlock() if !ok { panic("no init executor for " + host) } return } // GetSSHKeySet implements the operation.ExecutorGetter interface. func (ctx *Context) GetSSHKeySet() (privateKeyPath, publicKeyPath string) { return ctx.PrivateKeyPath, ctx.PublicKeyPath } // GetExecutor get the executor. func (ctx *Context) GetExecutor(host string) (e Executor, ok bool) { // Mock point for unit test if e := mock.On("FakeExecutor"); e != nil { return e.(Executor), true } ctx.mutex.RLock() e, ok = ctx.exec.executors[host] ctx.mutex.RUnlock() return } // SetExecutor set the executor. func (ctx *Context) SetExecutor(host string, e Executor) { ctx.mutex.Lock() if e != nil { ctx.exec.executors[host] = e } else { delete(ctx.exec.executors, host) } ctx.mutex.Unlock() } // GetOutputs get the outputs of a host (if has any) func (ctx *Context) GetOutputs(hostID string) ([]byte, []byte, bool) { ctx.mutex.RLock() stdout, ok1 := ctx.exec.stdouts[hostID] stderr, ok2 := ctx.exec.stderrs[hostID] ctx.mutex.RUnlock() return stdout, stderr, ok1 && ok2 } // SetOutputs set the outputs of a host func (ctx *Context) SetOutputs(hostID string, stdout []byte, stderr []byte) { ctx.mutex.Lock() ctx.exec.stdouts[hostID] = stdout ctx.exec.stderrs[hostID] = stderr ctx.mutex.Unlock() } // GetCheckResults get the the check result of a host (if has any) func (ctx *Context) GetCheckResults(host string) (results []any, ok bool) { ctx.mutex.RLock() results, ok = ctx.exec.checkResults[host] ctx.mutex.RUnlock() return } // SetCheckResults append the check result of a host to the list func (ctx *Context) SetCheckResults(host string, results []any) { ctx.mutex.Lock() if currResult, ok := ctx.exec.checkResults[host]; ok { ctx.exec.checkResults[host] = append(currResult, results...) } else { ctx.exec.checkResults[host] = results } ctx.mutex.Unlock() } tiup-1.16.3/pkg/cluster/ctxt/event_bus.go000066400000000000000000000046651505422223000203360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package ctxt import ( "fmt" ev "github.com/asaskevich/EventBus" "go.uber.org/zap" ) // EventBus is an event bus for task events. type EventBus struct { eventBus ev.Bus } // EventKind is the task event kind. type EventKind string const ( // EventTaskBegin is emitted when a task is going to be executed. EventTaskBegin EventKind = "task_begin" // EventTaskFinish is emitted when a task finishes executing. EventTaskFinish EventKind = "task_finish" // EventTaskProgress is emitted when a task has made some progress. EventTaskProgress EventKind = "task_progress" ) // NewEventBus creates a new EventBus. func NewEventBus() EventBus { return EventBus{ eventBus: ev.New(), } } // PublishTaskBegin publishes a TaskBegin event. This should be called only by Parallel or Serial. func (ev *EventBus) PublishTaskBegin(task fmt.Stringer) { zap.L().Debug("TaskBegin", zap.String("task", task.String())) ev.eventBus.Publish(string(EventTaskBegin), task) } // PublishTaskFinish publishes a TaskFinish event. This should be called only by Parallel or Serial. func (ev *EventBus) PublishTaskFinish(task fmt.Stringer, err error) { zap.L().Debug("TaskFinish", zap.String("task", task.String()), zap.Error(err)) ev.eventBus.Publish(string(EventTaskFinish), task, err) } // PublishTaskProgress publishes a TaskProgress event. func (ev *EventBus) PublishTaskProgress(task fmt.Stringer, progress string) { zap.L().Debug("TaskProgress", zap.String("task", task.String()), zap.String("progress", progress)) ev.eventBus.Publish(string(EventTaskProgress), task, progress) } // Subscribe subscribes events. func (ev *EventBus) Subscribe(eventName EventKind, handler any) { err := ev.eventBus.Subscribe(string(eventName), handler) if err != nil { panic(err) } } // Unsubscribe unsubscribes events. func (ev *EventBus) Unsubscribe(eventName EventKind, handler any) { err := ev.eventBus.Unsubscribe(string(eventName), handler) if err != nil { panic(err) } } tiup-1.16.3/pkg/cluster/executor/000077500000000000000000000000001505422223000166565ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/executor/checkpoint.go000066400000000000000000000065761505422223000213520ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "context" "fmt" "reflect" "time" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" "go.uber.org/zap" ) var ( // register checkpoint for ssh command sshPoint = checkpoint.Register( checkpoint.Field("host", reflect.DeepEqual), checkpoint.Field("port", func(a, b any) bool { return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", b) }), checkpoint.Field("user", reflect.DeepEqual), checkpoint.Field("sudo", reflect.DeepEqual), checkpoint.Field("cmd", reflect.DeepEqual), ) // register checkpoint for scp command scpPoint = checkpoint.Register( checkpoint.Field("host", reflect.DeepEqual), checkpoint.Field("port", func(a, b any) bool { return fmt.Sprintf("%v", a) == fmt.Sprintf("%v", b) }), checkpoint.Field("user", reflect.DeepEqual), checkpoint.Field("src", reflect.DeepEqual), checkpoint.Field("dst", reflect.DeepEqual), checkpoint.Field("download", reflect.DeepEqual), ) ) // CheckPointExecutor wraps Executor and inject checkpoints // // ATTENTION please: the result of CheckPointExecutor shouldn't be used to impact // external system like PD, otherwise, the external system may // take wrong action. type CheckPointExecutor struct { ctxt.Executor config *SSHConfig } // Execute implements Executor interface. func (c *CheckPointExecutor) Execute(ctx context.Context, cmd string, sudo bool, timeout ...time.Duration) (stdout []byte, stderr []byte, err error) { point := checkpoint.Acquire(ctx, sshPoint, map[string]any{ "host": c.config.Host, "port": c.config.Port, "user": c.config.User, "sudo": sudo, "cmd": cmd, }) defer func() { point.Release(err, zap.String("host", c.config.Host), zap.Int("port", c.config.Port), zap.String("user", c.config.User), zap.Bool("sudo", sudo), zap.String("cmd", cmd), zap.String("stdout", string(stdout)), zap.String("stderr", string(stderr)), ) }() if point.Hit() != nil { return []byte(point.Hit()["stdout"].(string)), []byte(point.Hit()["stderr"].(string)), nil } return c.Executor.Execute(ctx, cmd, sudo, timeout...) } // Transfer implements Executer interface. func (c *CheckPointExecutor) Transfer(ctx context.Context, src, dst string, download bool, limit int, compress bool) (err error) { point := checkpoint.Acquire(ctx, scpPoint, map[string]any{ "host": c.config.Host, "port": c.config.Port, "user": c.config.User, "src": src, "dst": dst, "download": download, "limit": limit, "compress": compress, }) defer func() { point.Release(err, zap.String("host", c.config.Host), zap.Int("port", c.config.Port), zap.String("user", c.config.User), zap.String("src", src), zap.String("dst", dst), zap.Bool("download", download)) }() if point.Hit() != nil { return nil } return c.Executor.Transfer(ctx, src, dst, download, limit, compress) } tiup-1.16.3/pkg/cluster/executor/executor.go000066400000000000000000000123351505422223000210470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "context" "fmt" "net" "os" "strings" "time" "github.com/joomcode/errorx" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/localdata" ) // SSHType represent the type of the channel used by ssh type SSHType string var ( errNS = errorx.NewNamespace("executor") // SSHTypeBuiltin is the type of easy ssh executor SSHTypeBuiltin SSHType = "builtin" // SSHTypeSystem is the type of host ssh client SSHTypeSystem SSHType = "system" // SSHTypeNone is the type of local executor (no ssh will be used) SSHTypeNone SSHType = "none" executeDefaultTimeout = time.Second * 60 // This command will be execute once the NativeSSHExecutor is created. // It's used to predict if the connection can establish success in the future. // Its main purpose is to avoid sshpass hang when user specified a wrong prompt. connectionTestCommand = "echo connection test, if killed, check the password prompt" // SSH authorized_keys file defaultSSHAuthorizedKeys = "~/.ssh/authorized_keys" ) // New create a new Executor func New(etype SSHType, sudo bool, c SSHConfig) (ctxt.Executor, error) { if etype == "" { etype = SSHTypeBuiltin } // Used in integration testing, to check if native ssh client is really used when it need to be. failpoint.Inject("assertNativeSSH", func() { // XXX: We call system executor 'native' by mistake in commit f1142b1 // this should be fixed after we remove --native-ssh flag if etype != SSHTypeSystem { msg := fmt.Sprintf( "native ssh client should be used in this case, os.Args: %s, %s = %s", os.Args, localdata.EnvNameNativeSSHClient, os.Getenv(localdata.EnvNameNativeSSHClient), ) panic(msg) } }) // set default values if c.Port <= 0 { c.Port = 22 } if c.Timeout == 0 { c.Timeout = time.Second * 5 // default timeout is 5 sec } var executor ctxt.Executor switch etype { case SSHTypeBuiltin: e := &EasySSHExecutor{ Locale: "C", Sudo: sudo, } e.initialize(c) executor = e case SSHTypeSystem: e := &NativeSSHExecutor{ Config: &c, Locale: "C", Sudo: sudo, } if c.Password != "" || (c.KeyFile != "" && c.Passphrase != "") { _, _, e.ConnectionTestResult = e.Execute(context.Background(), connectionTestCommand, false, executeDefaultTimeout) } executor = e case SSHTypeNone: if err := checkLocalIP(c.Host); err != nil { return nil, err } e := &Local{ Config: &c, Sudo: sudo, Locale: "C", } executor = e default: return nil, errors.Errorf("unregistered executor: %s", etype) } return &CheckPointExecutor{executor, &c}, nil } // UnwarpCheckPointExecutor unwarp the CheckPointExecutor and return the real executor // // Sometimes we just want to get the output of a command, and the CheckPointExecutor will // always cache the output, it will be a problem when we want to get the real output. func UnwarpCheckPointExecutor(e ctxt.Executor) ctxt.Executor { switch e := e.(type) { case *CheckPointExecutor: return e.Executor default: return e } } func checkLocalIP(ip string) error { ifaces, err := net.Interfaces() if err != nil { return errors.AddStack(err) } foundIps := []string{} for _, i := range ifaces { addrs, err := i.Addrs() if err != nil { continue } for _, addr := range addrs { switch v := addr.(type) { case *net.IPNet: if ip == v.IP.String() { return nil } foundIps = append(foundIps, v.IP.String()) case *net.IPAddr: if ip == v.IP.String() { return nil } foundIps = append(foundIps, v.IP.String()) } } } return fmt.Errorf("address %s not found in all interfaces, found ips: %s", ip, strings.Join(foundIps, ",")) } // FindSSHAuthorizedKeysFile finds the correct path of SSH authorized keys file func FindSSHAuthorizedKeysFile(ctx context.Context, exec ctxt.Executor) string { // detect if custom path of authorized keys file is set // NOTE: we do not yet support: // - custom config for user (~/.ssh/config) // - sshd started with custom config (other than /etc/ssh/sshd_config) // - ssh server implementations other than OpenSSH (such as dropbear) sshAuthorizedKeys := defaultSSHAuthorizedKeys cmd := "grep -Ev '^\\s*#|^\\s*$' /etc/ssh/sshd_config" stdout, _, _ := exec.Execute(ctx, cmd, true) // error ignored as we have default value for line := range strings.SplitSeq(string(stdout), "\n") { if !strings.Contains(line, "AuthorizedKeysFile") { continue } fields := strings.Fields(line) if len(fields) >= 2 { sshAuthorizedKeys = fields[1] break } } if !strings.HasPrefix(sshAuthorizedKeys, "/") && !strings.HasPrefix(sshAuthorizedKeys, "~") { sshAuthorizedKeys = fmt.Sprintf("~/%s", sshAuthorizedKeys) } return sshAuthorizedKeys } tiup-1.16.3/pkg/cluster/executor/local.go000066400000000000000000000107361505422223000203060ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "bytes" "context" "fmt" "os/exec" "os/user" "path/filepath" "strings" "time" "github.com/fatih/color" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) // Local execute the command at local host. type Local struct { Config *SSHConfig Sudo bool // all commands run with this executor will be using sudo Locale string // the locale used when executing the command } var _ ctxt.Executor = &Local{} // Execute implements Executor interface. func (l *Local) Execute(ctx context.Context, cmd string, sudo bool, timeout ...time.Duration) ([]byte, []byte, error) { // change wd to default home cmd = fmt.Sprintf("cd; %s", cmd) // get current user name user, err := user.Current() if err != nil { return nil, nil, err } // try to acquire root permission if l.Sudo || sudo { cmd = fmt.Sprintf("/usr/bin/sudo -H -u root bash -c \"%s\"", strings.ReplaceAll(cmd, "\"", "\\\"")) } else if l.Config.User != user.Name { cmd = fmt.Sprintf("/usr/bin/sudo -H -u %s bash -c \"%s\"", l.Config.User, strings.ReplaceAll(cmd, "\"", "\\\"")) } // set a basic PATH in case it's empty on login cmd = fmt.Sprintf("PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin %s", cmd) if l.Locale != "" { cmd = fmt.Sprintf("export LANG=%s; %s", l.Locale, cmd) } // run command on remote host // default timeout is 60s in easyssh-proxy if len(timeout) == 0 { timeout = append(timeout, executeDefaultTimeout) } if len(timeout) > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, timeout[0]) defer cancel() } command := exec.CommandContext(ctx, "/bin/bash", "-c", cmd) stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) command.Stdout = stdout command.Stderr = stderr err = command.Run() zap.L().Info("LocalCommand", zap.String("cmd", cmd), zap.Error(err), zap.String("stdout", stdout.String()), zap.String("stderr", stderr.String())) if err != nil { baseErr := ErrSSHExecuteFailed. Wrap(err, "Failed to execute command locally"). WithProperty(ErrPropSSHCommand, cmd). WithProperty(ErrPropSSHStdout, stdout). WithProperty(ErrPropSSHStderr, stderr) if len(stdout.Bytes()) > 0 || len(stderr.Bytes()) > 0 { output := strings.TrimSpace(strings.Join([]string{stdout.String(), stderr.String()}, "\n")) baseErr = baseErr. WithProperty(tui.SuggestionFromFormat("Command output:\n%s\n", color.YellowString(output))) } return stdout.Bytes(), stderr.Bytes(), baseErr } return stdout.Bytes(), stderr.Bytes(), err } // Transfer implements Executer interface. func (l *Local) Transfer(ctx context.Context, src, dst string, download bool, limit int, _ bool) error { targetPath := filepath.Dir(dst) if err := utils.MkdirAll(targetPath, 0755); err != nil { return err } cmd := "" user, err := user.Current() if err != nil { return err } if download || user.Username == l.Config.User { cmd = fmt.Sprintf("cp %s %s", src, dst) } else { cmd = fmt.Sprintf("/usr/bin/sudo -H -u root bash -c \"cp %[1]s %[2]s && chown %[3]s:$(id -g -n %[3]s) %[2]s\"", src, dst, l.Config.User) } command := exec.Command("/bin/bash", "-c", cmd) stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) command.Stdout = stdout command.Stderr = stderr err = command.Run() zap.L().Info("CPCommand", zap.String("cmd", cmd), zap.Error(err), zap.String("stdout", stdout.String()), zap.String("stderr", stderr.String())) if err != nil { baseErr := ErrSSHExecuteFailed. Wrap(err, "Failed to transfer file over local cp"). WithProperty(ErrPropSSHCommand, cmd). WithProperty(ErrPropSSHStdout, stdout). WithProperty(ErrPropSSHStderr, stderr) if len(stdout.Bytes()) > 0 || len(stderr.Bytes()) > 0 { output := strings.TrimSpace(strings.Join([]string{stdout.String(), stderr.String()}, "\n")) baseErr = baseErr. WithProperty(tui.SuggestionFromFormat("Command output:\n%s\n", color.YellowString(output))) } return baseErr } return err } tiup-1.16.3/pkg/cluster/executor/local_test.go000066400000000000000000000051351505422223000213420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "context" "fmt" "os" "os/user" "testing" "github.com/pingcap/tiup/pkg/cluster/ctxt" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/stretchr/testify/require" ) func TestLocal(t *testing.T) { ctx := ctxt.New(context.Background(), 0, logprinter.NewLogger("")) assert := require.New(t) user, err := user.Current() assert.Nil(err) local, err := New(SSHTypeNone, false, SSHConfig{Host: "127.0.0.1", User: user.Username}) assert.Nil(err) _, _, err = local.Execute(ctx, "ls .", false) assert.Nil(err) // generate a src file and write some data src, err := os.CreateTemp("", "") assert.Nil(err) defer os.Remove(src.Name()) n, err := src.WriteString("src") assert.Nil(err) assert.Equal(3, n) err = src.Close() assert.Nil(err) // generate a dst file and just close it. dst, err := os.CreateTemp("", "") assert.Nil(err) err = dst.Close() assert.Nil(err) defer os.Remove(dst.Name()) // Transfer src to dst and check it. err = local.Transfer(ctx, src.Name(), dst.Name(), false, 0, false) assert.Nil(err) data, err := os.ReadFile(dst.Name()) assert.Nil(err) assert.Equal("src", string(data)) } func TestWrongIP(t *testing.T) { assert := require.New(t) user, err := user.Current() assert.Nil(err) _, err = New(SSHTypeNone, false, SSHConfig{Host: "127.0.0.2", User: user.Username}) assert.NotNil(err) assert.Contains(err.Error(), "not found") } func TestLocalExecuteWithQuotes(t *testing.T) { ctx := ctxt.New(context.Background(), 0, logprinter.NewLogger("")) assert := require.New(t) user, err := user.Current() assert.Nil(err) local, err := New(SSHTypeNone, false, SSHConfig{Host: "127.0.0.1", User: user.Username}) assert.Nil(err) deployDir, err := os.MkdirTemp("", "tiup-*") assert.Nil(err) defer os.RemoveAll(deployDir) cmds := []string{ fmt.Sprintf(`find %s -type f -exec sed -i 's/\${DS_.*-CLUSTER}/hello/g' {} \;`, deployDir), fmt.Sprintf(`find %s -type f -exec sed -i 's/DS_.*-CLUSTER/hello/g' {} \;`, deployDir), `ls '/tmp'`, } for _, cmd := range cmds { _, _, err = local.Execute(ctx, cmd, false) assert.Nil(err) } } tiup-1.16.3/pkg/cluster/executor/scp.go000066400000000000000000000104421505422223000177730ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "bufio" "fmt" "io" "io/fs" "os" "path/filepath" "strconv" "strings" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/crypto/ssh" ) // ScpDownload downloads a file from remote with SCP // The implementation is partially inspired by github.com/dtylman/scp // Do not support pattern, src and dst must be dir or file path func ScpDownload(session *ssh.Session, client *ssh.Client, src, dst string, limit int, compress bool) error { r, err := session.StdoutPipe() if err != nil { return err } bufr := bufio.NewReader(r) w, err := session.StdinPipe() if err != nil { return err } remoteArgs := make([]string, 0) if compress { remoteArgs = append(remoteArgs, "-C") } if limit > 0 { remoteArgs = append(remoteArgs, fmt.Sprintf("-l %d", limit)) } remoteCmd := fmt.Sprintf("scp -r %s -f %s", strings.Join(remoteArgs, " "), src) err = session.Start(remoteCmd) if err != nil { return err } if err := ack(w); err != nil { // send an empty byte to start transfer return err } wd := dst for firstCommand := true; ; firstCommand = false { // parse scp command line, err := bufr.ReadString('\n') if err == io.EOF { break } else if err != nil { return err } switch line[0] { // ignore ACK from server case '\x00': line = line[1:] case '\x01': return fmt.Errorf("scp warning: %s", line[1:]) case '\x02': return fmt.Errorf("scp error: %s", line[1:]) } switch line[0] { case 'C': mode, size, name, err := parseLine(line) if err != nil { return err } fp := filepath.Join(wd, name) // first scp command is 'C' means src is a single file if firstCommand { fp = dst } targetFile, err := os.OpenFile(fp, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode|0700) if err != nil { return err } defer targetFile.Close() if err := ack(w); err != nil { return err } // transferring data n, err := io.CopyN(targetFile, bufr, size) if err != nil { return err } if n < size { return fmt.Errorf("error downloading via scp, file size mismatch") } if err := targetFile.Sync(); err != nil { return err } case 'D': mode, _, name, err := parseLine(line) if err != nil { return err } // normally, workdir is like this wd = filepath.Join(wd, name) // first scp command is 'D' means src is a dir if firstCommand { fi, err := os.Stat(dst) if err != nil && !os.IsNotExist(err) { return err } else if err == nil && !fi.IsDir() { return fmt.Errorf("%s cannot be an exist file", wd) } else if os.IsNotExist(err) { // dst is not exist, so dst is the target dir wd = dst } else { // dst is exist, dst/name is the target dir break } } err = utils.MkdirAll(wd, mode) if err != nil { return err } case 'E': wd = filepath.Dir(wd) default: return fmt.Errorf("incorrect scp command '%b', should be 'C', 'D' or 'E'", line[0]) } err = ack(w) if err != nil { return err } } return session.Wait() } func ack(w io.Writer) error { msg := []byte("\x00") n, err := w.Write(msg) if err != nil { return fmt.Errorf("fail to send response to remote: %s", err) } if n < len(msg) { return fmt.Errorf("fail to send response to remote, size mismatch") } return nil } func parseLine(line string) (mode fs.FileMode, size int64, name string, err error) { words := strings.Fields(strings.TrimSuffix(line, "\n")) if len(words) < 3 { return 0, 0, "", fmt.Errorf("incorrect scp command param number: %d", len(words)) } modeN, err := strconv.ParseUint(words[0][1:], 0, 32) if err != nil { return 0, 0, "", fmt.Errorf("error parsing file mode; %s", err) } mode = fs.FileMode(modeN) size, err = strconv.ParseInt(words[1], 10, 64) if err != nil { return 0, 0, "", err } name = words[2] return } tiup-1.16.3/pkg/cluster/executor/ssh.go000066400000000000000000000330771505422223000200140ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "bytes" "context" "fmt" "os" "os/exec" "path/filepath" "strconv" "strings" "time" "github.com/appleboy/easyssh-proxy" "github.com/fatih/color" "github.com/joomcode/errorx" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) var ( errNSSSH = errNS.NewSubNamespace("ssh") // ErrPropSSHCommand is ErrPropSSHCommand ErrPropSSHCommand = errorx.RegisterPrintableProperty("ssh_command") // ErrPropSSHStdout is ErrPropSSHStdout ErrPropSSHStdout = errorx.RegisterPrintableProperty("ssh_stdout") // ErrPropSSHStderr is ErrPropSSHStderr ErrPropSSHStderr = errorx.RegisterPrintableProperty("ssh_stderr") // ErrSSHExecuteFailed is ErrSSHExecuteFailed ErrSSHExecuteFailed = errNSSSH.NewType("execute_failed") // ErrSSHExecuteTimedout is ErrSSHExecuteTimedout ErrSSHExecuteTimedout = errNSSSH.NewType("execute_timedout") ) func init() { v := os.Getenv("TIUP_CLUSTER_EXECUTE_DEFAULT_TIMEOUT") if v != "" { d, err := time.ParseDuration(v) if err != nil { fmt.Println("ignore invalid TIUP_CLUSTER_EXECUTE_DEFAULT_TIMEOUT: ", v) return } executeDefaultTimeout = d } } type ( // EasySSHExecutor implements Executor with EasySSH as transportation layer. EasySSHExecutor struct { Config *easyssh.MakeConfig Locale string // the locale used when executing the command Sudo bool // all commands run with this executor will be using sudo } // NativeSSHExecutor implements Excutor with native SSH transportation layer. NativeSSHExecutor struct { Config *SSHConfig Locale string // the locale used when executing the command Sudo bool // all commands run with this executor will be using sudo ConnectionTestResult error // test if the connection can be established in initialization phase } // SSHConfig is the configuration needed to establish SSH connection. SSHConfig struct { Host string // hostname of the SSH server Port int // port of the SSH server User string // username to login to the SSH server Password string // password of the user KeyFile string // path to the private key file Passphrase string // passphrase of the private key file Timeout time.Duration // Timeout is the maximum amount of time for the TCP connection to establish. ExeTimeout time.Duration // ExeTimeout is the maximum amount of time for the command to finish Proxy *SSHConfig // ssh proxy config } ) var _ ctxt.Executor = &EasySSHExecutor{} var _ ctxt.Executor = &NativeSSHExecutor{} // initialize builds and initializes a EasySSHExecutor func (e *EasySSHExecutor) initialize(config SSHConfig) { // build easyssh config e.Config = &easyssh.MakeConfig{ Server: config.Host, Port: strconv.Itoa(config.Port), User: config.User, Timeout: config.Timeout, // timeout when connecting to remote } if config.ExeTimeout > 0 { executeDefaultTimeout = config.ExeTimeout } // prefer private key authentication if len(config.KeyFile) > 0 { e.Config.KeyPath = config.KeyFile e.Config.Passphrase = config.Passphrase } else if len(config.Password) > 0 { e.Config.Password = config.Password } if proxy := config.Proxy; proxy != nil { e.Config.Proxy = easyssh.DefaultConfig{ Server: proxy.Host, Port: strconv.Itoa(proxy.Port), User: proxy.User, Timeout: proxy.Timeout, // timeout when connecting to remote } if len(proxy.KeyFile) > 0 { e.Config.Proxy.KeyPath = proxy.KeyFile e.Config.Proxy.Passphrase = proxy.Passphrase } else if len(proxy.Password) > 0 { e.Config.Proxy.Password = proxy.Password } } } // Execute run the command via SSH, it's not invoking any specific shell by default. func (e *EasySSHExecutor) Execute(ctx context.Context, cmd string, sudo bool, timeout ...time.Duration) ([]byte, []byte, error) { // try to acquire root permission if e.Sudo || sudo { cmd = fmt.Sprintf("/usr/bin/sudo -H bash -c \"%s\"", cmd) } // set a basic PATH in case it's empty on login cmd = fmt.Sprintf("PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin; %s", cmd) if e.Locale != "" { cmd = fmt.Sprintf("export LANG=%s; %s", e.Locale, cmd) } // run command on remote host // default timeout is 60s in easyssh-proxy if len(timeout) == 0 { timeout = append(timeout, executeDefaultTimeout) } stdout, stderr, done, err := e.Config.Run(cmd, timeout...) logfn := zap.L().Info if err != nil { logfn = zap.L().Error } logfn("SSHCommand", zap.String("host", e.Config.Server), zap.String("port", e.Config.Port), zap.String("cmd", cmd), zap.Error(err), zap.String("stdout", stdout), zap.String("stderr", stderr)) if err != nil { baseErr := ErrSSHExecuteFailed. Wrap(err, "Failed to execute command over SSH for '%s@%s:%s'", e.Config.User, e.Config.Server, e.Config.Port). WithProperty(ErrPropSSHCommand, cmd). WithProperty(ErrPropSSHStdout, stdout). WithProperty(ErrPropSSHStderr, stderr) if len(stdout) > 0 || len(stderr) > 0 { output := strings.TrimSpace(strings.Join([]string{stdout, stderr}, "\n")) baseErr = baseErr. WithProperty(tui.SuggestionFromFormat("Command output on remote host %s:\n%s\n", e.Config.Server, color.YellowString(output))) } return []byte(stdout), []byte(stderr), baseErr } if !done { // timeout case, return []byte(stdout), []byte(stderr), ErrSSHExecuteTimedout. Wrap(err, "Execute command over SSH timedout for '%s@%s:%s'", e.Config.User, e.Config.Server, e.Config.Port). WithProperty(ErrPropSSHCommand, cmd). WithProperty(ErrPropSSHStdout, stdout). WithProperty(ErrPropSSHStderr, stderr) } return []byte(stdout), []byte(stderr), nil } // Transfer copies files via SCP // This function depends on `scp` (a tool from OpenSSH or other SSH implementation) // This function is based on easyssh.MakeConfig.Scp() but with support of copying // file from remote to local. func (e *EasySSHExecutor) Transfer(ctx context.Context, src, dst string, download bool, limit int, compress bool) error { if !download { err := e.Config.Scp(src, dst) if err != nil { return errors.Annotatef(err, "failed to scp %s to %s@%s:%s", src, e.Config.User, e.Config.Server, dst) } return nil } // download file from remote session, client, err := e.Config.Connect() if err != nil { return err } defer client.Close() defer session.Close() err = utils.MkdirAll(filepath.Dir(dst), 0755) if err != nil { return nil } return ScpDownload(session, client, src, dst, limit, compress) } func (e *NativeSSHExecutor) prompt(def string) string { if prom := os.Getenv(localdata.EnvNameSSHPassPrompt); prom != "" { return prom } return def } func (e *NativeSSHExecutor) configArgs(args []string, isScp bool) []string { if e.Config.Port != 0 && e.Config.Port != 22 { if isScp { args = append(args, "-P", strconv.Itoa(e.Config.Port)) } else { args = append(args, "-p", strconv.Itoa(e.Config.Port)) } } if e.Config.Timeout != 0 { args = append(args, "-o", fmt.Sprintf("ConnectTimeout=%d", int64(e.Config.Timeout.Seconds()))) } if e.Config.Password != "" { args = append([]string{"sshpass", "-p", e.Config.Password, "-P", e.prompt("password")}, args...) } else if e.Config.KeyFile != "" { args = append(args, "-i", e.Config.KeyFile) if e.Config.Passphrase != "" { args = append([]string{"sshpass", "-p", e.Config.Passphrase, "-P", e.prompt("passphrase")}, args...) } } proxy := e.Config.Proxy if proxy != nil { proxyArgs := []string{"ssh"} if proxy.Timeout != 0 { proxyArgs = append(proxyArgs, "-o", fmt.Sprintf("ConnectTimeout=%d", int64(proxy.Timeout.Seconds()))) } if proxy.Password != "" { proxyArgs = append([]string{"sshpass", "-p", proxy.Password, "-P", e.prompt("password")}, proxyArgs...) } else if proxy.KeyFile != "" { proxyArgs = append(proxyArgs, "-i", proxy.KeyFile) if proxy.Passphrase != "" { proxyArgs = append([]string{"sshpass", "-p", proxy.Passphrase, "-P", e.prompt("passphrase")}, proxyArgs...) } } // Don't need to extra quote it, exec.Command will handle it right // ref https://stackoverflow.com/a/26473771/2298986 args = append(args, []string{"-o", fmt.Sprintf(`ProxyCommand=%s %s@%s -p %d -W %%h:%%p`, strings.Join(proxyArgs, " "), proxy.User, proxy.Host, proxy.Port)}...) } return args } // Execute run the command via SSH, it's not invoking any specific shell by default. func (e *NativeSSHExecutor) Execute(ctx context.Context, cmd string, sudo bool, timeout ...time.Duration) ([]byte, []byte, error) { if e.ConnectionTestResult != nil { return nil, nil, e.ConnectionTestResult } // try to acquire root permission if e.Sudo || sudo { cmd = fmt.Sprintf("/usr/bin/sudo -H bash -c \"%s\"", cmd) } // set a basic PATH in case it's empty on login cmd = fmt.Sprintf("PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin %s", cmd) if e.Locale != "" { cmd = fmt.Sprintf("export LANG=%s; %s", e.Locale, cmd) } // run command on remote host // default timeout is 60s in easyssh-proxy if len(timeout) == 0 { timeout = append(timeout, executeDefaultTimeout) } if len(timeout) > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, timeout[0]) defer cancel() } ssh := "ssh" if val := os.Getenv(localdata.EnvNameSSHPath); val != "" { if isExec := utils.IsExecBinary(val); !isExec { return nil, nil, fmt.Errorf("specified SSH in the environment variable `%s` does not exist or is not executable", localdata.EnvNameSSHPath) } ssh = val } args := []string{ssh, "-o", "StrictHostKeyChecking=no"} args = e.configArgs(args, false) // prefix and postfix args args = append(args, fmt.Sprintf("%s@%s", e.Config.User, e.Config.Host), cmd) command := exec.CommandContext(ctx, args[0], args[1:]...) stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) command.Stdout = stdout command.Stderr = stderr err := command.Run() logfn := zap.L().Info if err != nil { logfn = zap.L().Error } logfn("SSHCommand", zap.String("host", e.Config.Host), zap.Int("port", e.Config.Port), zap.String("cmd", cmd), zap.Error(err), zap.String("stdout", stdout.String()), zap.String("stderr", stderr.String())) if err != nil { baseErr := ErrSSHExecuteFailed. Wrap(err, "Failed to execute command over SSH for '%s@%s:%d'", e.Config.User, e.Config.Host, e.Config.Port). WithProperty(ErrPropSSHCommand, cmd). WithProperty(ErrPropSSHStdout, stdout). WithProperty(ErrPropSSHStderr, stderr) if len(stdout.Bytes()) > 0 || len(stderr.Bytes()) > 0 { output := strings.TrimSpace(strings.Join([]string{stdout.String(), stderr.String()}, "\n")) baseErr = baseErr. WithProperty(tui.SuggestionFromFormat("Command output on remote host %s:\n%s\n", e.Config.Host, color.YellowString(output))) } return stdout.Bytes(), stderr.Bytes(), baseErr } return stdout.Bytes(), stderr.Bytes(), err } // Transfer copies files via SCP // This function depends on `scp` (a tool from OpenSSH or other SSH implementation) func (e *NativeSSHExecutor) Transfer(ctx context.Context, src, dst string, download bool, limit int, compress bool) error { if e.ConnectionTestResult != nil { return e.ConnectionTestResult } scp := "scp" if val := os.Getenv(localdata.EnvNameSCPPath); val != "" { if isExec := utils.IsExecBinary(val); !isExec { return fmt.Errorf("specified SCP in the environment variable `%s` does not exist or is not executable", localdata.EnvNameSCPPath) } scp = val } args := []string{scp, "-r", "-o", "StrictHostKeyChecking=no"} if limit > 0 { args = append(args, "-l", fmt.Sprint(limit)) } if compress { args = append(args, "-C") } args = e.configArgs(args, true) // prefix and postfix args if download { targetPath := filepath.Dir(dst) if err := utils.MkdirAll(targetPath, 0755); err != nil { return err } args = append(args, fmt.Sprintf("%s@%s:%s", e.Config.User, e.Config.Host, src), dst) } else { args = append(args, src, fmt.Sprintf("%s@%s:%s", e.Config.User, e.Config.Host, dst)) } command := exec.Command(args[0], args[1:]...) stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) command.Stdout = stdout command.Stderr = stderr err := command.Run() logfn := zap.L().Info if err != nil { logfn = zap.L().Error } logfn("SCPCommand", zap.String("host", e.Config.Host), zap.Int("port", e.Config.Port), zap.String("cmd", strings.Join(args, " ")), zap.Error(err), zap.String("stdout", stdout.String()), zap.String("stderr", stderr.String())) if err != nil { baseErr := ErrSSHExecuteFailed. Wrap(err, "Failed to transfer file over SCP for '%s@%s:%d'", e.Config.User, e.Config.Host, e.Config.Port). WithProperty(ErrPropSSHCommand, strings.Join(args, " ")). WithProperty(ErrPropSSHStdout, stdout). WithProperty(ErrPropSSHStderr, stderr) if len(stdout.Bytes()) > 0 || len(stderr.Bytes()) > 0 { output := strings.TrimSpace(strings.Join([]string{stdout.String(), stderr.String()}, "\n")) baseErr = baseErr. WithProperty(tui.SuggestionFromFormat("Command output on remote host %s:\n%s\n", e.Config.Host, color.YellowString(output))) } return baseErr } return err } tiup-1.16.3/pkg/cluster/executor/ssh_test.go000066400000000000000000000056131505422223000210460ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package executor import ( "strings" "testing" "time" "github.com/stretchr/testify/assert" ) func TestNativeSSHConfigArgs(t *testing.T) { testcases := []struct { c *SSHConfig s bool e string }{ { &SSHConfig{ KeyFile: "id_rsa", }, false, "-i id_rsa", }, { &SSHConfig{ Timeout: 60 * time.Second, Port: 23, KeyFile: "id_rsa", }, false, "-p 23 -o ConnectTimeout=60 -i id_rsa", }, { &SSHConfig{ Timeout: 60 * time.Second, Port: 23, KeyFile: "id_rsa", }, true, "-P 23 -o ConnectTimeout=60 -i id_rsa", }, { &SSHConfig{ Timeout: 60 * time.Second, KeyFile: "id_rsa", Port: 23, Passphrase: "tidb", }, false, "sshpass -p tidb -P passphrase -p 23 -o ConnectTimeout=60 -i id_rsa", }, { &SSHConfig{ Timeout: 60 * time.Second, KeyFile: "id_rsa", Port: 23, Passphrase: "tidb", }, true, "sshpass -p tidb -P passphrase -P 23 -o ConnectTimeout=60 -i id_rsa", }, { &SSHConfig{ Timeout: 60 * time.Second, Password: "tidb", }, true, "sshpass -p tidb -P password -o ConnectTimeout=60", }, { &SSHConfig{ Timeout: 60 * time.Second, KeyFile: "id_rsa", Proxy: &SSHConfig{ User: "root", Host: "proxy1", Port: 222, KeyFile: "b.id_rsa", }, }, false, "-o ConnectTimeout=60 -i id_rsa -o ProxyCommand=ssh -i b.id_rsa root@proxy1 -p 222 -W %h:%p", }, { &SSHConfig{ Timeout: 60 * time.Second, Port: 1203, KeyFile: "id_rsa", Proxy: &SSHConfig{ User: "root", Host: "proxy1", Port: 222, KeyFile: "b.id_rsa", Timeout: 10 * time.Second, }, }, false, "-p 1203 -o ConnectTimeout=60 -i id_rsa -o ProxyCommand=ssh -o ConnectTimeout=10 -i b.id_rsa root@proxy1 -p 222 -W %h:%p", }, { &SSHConfig{ Timeout: 60 * time.Second, Password: "pass", Proxy: &SSHConfig{ User: "root", Host: "proxy1", Port: 222, Password: "word", Timeout: 10 * time.Second, }, }, false, "sshpass -p pass -P password -o ConnectTimeout=60 -o ProxyCommand=sshpass -p word -P password ssh -o ConnectTimeout=10 root@proxy1 -p 222 -W %h:%p", }, } e := &NativeSSHExecutor{} for _, tc := range testcases { e.Config = tc.c assert.Equal(t, tc.e, strings.Join(e.configArgs([]string{}, tc.s), " ")) } } tiup-1.16.3/pkg/cluster/manager/000077500000000000000000000000001505422223000164325ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/manager/basic.go000066400000000000000000000247341505422223000200540ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "errors" "fmt" "os" "strings" "time" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // EnableCluster enable/disable the service in a cluster func (m *Manager) EnableCluster(name string, gOpt operator.Options, isEnable bool) error { if isEnable { m.logger.Infof("Enabling cluster %s...", name) } else { m.logger.Infof("Disabling cluster %s...", name) } metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } if isEnable { b = b.Func("EnableCluster", func(ctx context.Context) error { return operator.Enable(ctx, topo, gOpt, isEnable) }) } else { b = b.Func("DisableCluster", func(ctx context.Context) error { return operator.Enable(ctx, topo, gOpt, isEnable) }) } t := b.Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } if isEnable { m.logger.Infof("Enabled cluster `%s` successfully", name) } else { m.logger.Infof("Disabled cluster `%s` successfully", name) } return nil } // StartCluster start the cluster with specified name. func (m *Manager) StartCluster(name string, gOpt operator.Options, restoreLeader bool, fn ...func(b *task.Builder, metadata spec.Metadata)) error { m.logger.Infof("Starting cluster %s...", name) // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { return err } metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } b.Func("StartCluster", func(ctx context.Context) error { return operator.Start(ctx, topo, gOpt, restoreLeader, tlsCfg) }) for _, f := range fn { f(b, metadata) } t := b.Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } m.logger.Infof("Started cluster `%s` successfully", name) return nil } // StopCluster stop the cluster. func (m *Manager) StopCluster( name string, gOpt operator.Options, skipConfirm, evictLeader bool, ) error { // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { return err } metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("Will stop the cluster %s with nodes: %s, roles: %s.\nDo you want to continue? [y/N]:", color.HiYellowString(name), color.HiRedString(strings.Join(gOpt.Nodes, ",")), color.HiRedString(strings.Join(gOpt.Roles, ",")), ), ); err != nil { return err } } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t := b. Func("StopCluster", func(ctx context.Context) error { return operator.Stop(ctx, topo, gOpt, evictLeader, tlsCfg) }). Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } m.logger.Infof("Stopped cluster `%s` successfully", name) return nil } // RestartCluster restart the cluster. func (m *Manager) RestartCluster(name string, gOpt operator.Options, skipConfirm bool) error { // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { return err } metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } if !skipConfirm { var availabilityMessage string var rolesToRestart string var nodesToRestart string if len(gOpt.Nodes) == 0 && len(gOpt.Roles) == 0 { availabilityMessage = "Cluster will be unavailable" rolesToRestart = "all" nodesToRestart = "all" } else { availabilityMessage = fmt.Sprintf("Cluster functionality related to nodes: %s roles: %s will be unavailable", strings.Join(gOpt.Nodes, ","), strings.Join(gOpt.Roles, ",")) nodesToRestart = strings.Join(gOpt.Nodes, ",") rolesToRestart = strings.Join(gOpt.Roles, ",") } confirmationMessage := fmt.Sprintf("Will restart the cluster %s with nodes: %s roles: %s.\n%s\nDo you want to continue? [y/N]:", color.HiYellowString(name), color.HiYellowString(nodesToRestart), color.HiYellowString(rolesToRestart), availabilityMessage, ) if err := tui.PromptForConfirmOrAbortError("%s", confirmationMessage); err != nil { return err } } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t := b. Func("RestartCluster", func(ctx context.Context) error { return operator.Restart(ctx, topo, gOpt, tlsCfg) }). Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } m.logger.Infof("Restarted cluster `%s` successfully", name) return nil } // getMonitorHosts get the instance to ignore list if it marks itself as ignore_exporter func getMonitorHosts(topo spec.Topology) (map[string]hostInfo, set.StringSet) { // monitor uniqueHosts := make(map[string]hostInfo) // host -> ssh-port, os, arch noAgentHosts := set.NewStringSet() topo.IterInstance(func(inst spec.Instance) { // add the instance to ignore list if it marks itself as ignore_exporter if inst.IgnoreMonitorAgent() { noAgentHosts.Insert(inst.GetManageHost()) } if _, found := uniqueHosts[inst.GetManageHost()]; !found { uniqueHosts[inst.GetManageHost()] = hostInfo{ ssh: inst.GetSSHPort(), os: inst.OS(), arch: inst.Arch(), } } }) return uniqueHosts, noAgentHosts } // checkTiFlashWithTLS check tiflash vserson func checkTiFlashWithTLS(topo spec.Topology, version string) error { if clusterSpec, ok := topo.(*spec.Specification); ok { if clusterSpec.GlobalOptions.TLSEnabled { if (!tidbver.TiFlashSupportTLS(version) && len(clusterSpec.TiFlashServers) > 0) && version != utils.NightlyVersionAlias { return fmt.Errorf("TiFlash %s is not supported in TLS enabled cluster", version) } } } return nil } // BackupClusterMeta backup cluster meta to given filepath func (m *Manager) BackupClusterMeta(clusterName, filePath string) error { exist, err := m.specManager.Exist(clusterName) if err != nil { return err } if !exist { return fmt.Errorf("cluster %s does not exist", clusterName) } // check locked if err := m.specManager.ScaleOutLockedErr(clusterName); err != nil { return err } f, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755) if err != nil { return err } return utils.Tar(f, m.specManager.Path(clusterName)) } // RestoreClusterMeta restore cluster meta by given filepath func (m *Manager) RestoreClusterMeta(clusterName, filePath string, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(clusterName); err != nil { return err } fi, err := os.Stat(m.specManager.Path(clusterName)) if err != nil { if !os.IsNotExist(err) { return perrs.AddStack(err) } m.logger.Infof("%s", fmt.Sprintf("meta of cluster %s didn't exist before restore", clusterName)) skipConfirm = true } else { m.logger.Warnf("%s", color.HiRedString(tui.ASCIIArtWarning)) exist, err := m.specManager.Exist(clusterName) if err != nil { return err } if exist { m.logger.Infof("%s", fmt.Sprintf("the exist meta.yaml of cluster %s was last modified at %s", clusterName, color.HiYellowString(fi.ModTime().Format(time.RFC3339)))) } else { m.logger.Infof("%s", fmt.Sprintf("the meta.yaml of cluster %s does not exist", clusterName)) } } fi, err = os.Stat(filePath) if err != nil { return err } m.logger.Warnf("the given tarball was last modified at %s", color.HiYellowString(fi.ModTime().Format(time.RFC3339))) if !skipConfirm { if err := tui.PromptForAnswerOrAbortError( "Yes, I know my cluster meta will be be overridden.", "%s", fmt.Sprintf("This operation will override topology file and other meta file of %s cluster %s .", m.sysName, color.HiYellowString(clusterName), )+"\nAre you sure to continue?", ); err != nil { return err } m.logger.Infof("Restoring cluster meta files...") } err = os.RemoveAll(m.specManager.Path(clusterName)) if err != nil { return err } f, err := os.Open(filePath) if err != nil { return err } err = utils.Untar(f, m.specManager.Path(clusterName)) if err == nil { m.logger.Infof("%s", fmt.Sprintf("restore meta of cluster %s successfully.", clusterName)) } return err } tiup-1.16.3/pkg/cluster/manager/builder.go000066400000000000000000000742621505422223000204220ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "encoding/pem" "fmt" "os" "path/filepath" "strings" "github.com/fatih/color" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/crypto" "github.com/pingcap/tiup/pkg/environment" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // buildReloadPromTasks reloads Prometheus and Grafana configuration func buildReloadPromAndGrafanaTasks( topo spec.Topology, logger *logprinter.Logger, gOpt operator.Options, nodes ...string, ) []*task.StepDisplay { var instances []spec.Instance // get promtheus and grafana instance list monitor := spec.FindComponent(topo, spec.ComponentPrometheus) grafanas := spec.FindComponent(topo, spec.ComponentGrafana) instances = append(instances, monitor.Instances()...) instances = append(instances, grafanas.Instances()...) if len(instances) == 0 { return nil } var tasks []*task.StepDisplay deletedNodes := set.NewStringSet(nodes...) systemdMode := topo.BaseTopo().GlobalOptions.SystemdMode for _, inst := range instances { if deletedNodes.Exist(inst.ID()) { continue } t := task.NewBuilder(logger) if inst.ComponentName() == spec.ComponentPrometheus { // reload Prometheus t = t.SystemCtl(inst.GetManageHost(), inst.ServiceName(), "reload", true, true, string(systemdMode)) } else { // restart grafana t = t.SystemCtl(inst.GetManageHost(), inst.ServiceName(), "restart", true, false, string(systemdMode)) } tasks = append(tasks, t.BuildAsStep(fmt.Sprintf(" - Reload %s -> %s", inst.ComponentName(), inst.ID()))) } return tasks } func buildScaleOutTask( m *Manager, name string, metadata spec.Metadata, mergedTopo spec.Topology, opt DeployOptions, s, p *tui.SSHConnectionProps, newPart spec.Topology, patchedComponents set.StringSet, gOpt operator.Options, afterDeploy func(b *task.Builder, newPart spec.Topology, gOpt operator.Options), final func(b *task.Builder, name string, meta spec.Metadata, gOpt operator.Options), ) (task.Task, error) { var ( envInitTasks []*task.StepDisplay // tasks which are used to initialize environment downloadCompTasks []*task.StepDisplay // tasks which are used to download components deployCompTasks []*task.StepDisplay // tasks which are used to copy components to remote host ) topo := metadata.GetTopology() base := metadata.GetBaseMeta() specManager := m.specManager tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return nil, err } var sudo bool systemdMode := topo.BaseTopo().GlobalOptions.SystemdMode if systemdMode == spec.UserMode { sudo = false } else { sudo = true } // Initialize the environments initializedHosts := set.NewStringSet() metadata.GetTopology().IterInstance(func(instance spec.Instance) { initializedHosts.Insert(instance.GetManageHost()) }) // uninitializedHosts are hosts which haven't been initialized yet uninitializedHosts := make(map[string]hostInfo) // host -> ssh-port, os, arch newPart.IterInstance(func(instance spec.Instance) { host := instance.GetManageHost() if initializedHosts.Exist(host) { return } if _, found := uninitializedHosts[host]; found { return } uninitializedHosts[host] = hostInfo{ ssh: instance.GetSSHPort(), os: instance.OS(), arch: instance.Arch(), } var dirs []string globalOptions := metadata.GetTopology().BaseTopo().GlobalOptions for _, dir := range []string{globalOptions.DeployDir, globalOptions.DataDir, globalOptions.LogDir} { for dirname := range strings.SplitSeq(dir, ",") { if dirname == "" { continue } dirs = append(dirs, spec.Abs(globalOptions.User, dirname)) } } if systemdMode == spec.UserMode { dirs = append(dirs, spec.Abs(globalOptions.User, ".config/systemd/user")) } t := task.NewBuilder(m.logger). RootSSH( instance.GetManageHost(), instance.GetSSHPort(), opt.User, s.Password, s.IdentityFile, s.IdentityFilePassphrase, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, p.Password, p.IdentityFile, p.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, globalOptions.SSHType, opt.User != "root" && systemdMode != spec.UserMode, ). EnvInit(instance.GetManageHost(), base.User, base.Group, opt.SkipCreateUser || globalOptions.User == opt.User, sudo). Mkdir(globalOptions.User, instance.GetManageHost(), sudo, dirs...). BuildAsStep(fmt.Sprintf(" - Initialized host %s ", host)) envInitTasks = append(envInitTasks, t) }) // Download missing component downloadCompTasks = buildDownloadCompTasks( base.Version, newPart, m.logger, gOpt, ) sshType := topo.BaseTopo().GlobalOptions.SSHType var iterErr error // Deploy the new topology and refresh the configuration newPart.IterInstance(func(inst spec.Instance) { version := inst.CalculateVersion(base.Version) deployDir := spec.Abs(base.User, inst.DeployDir()) // data dir would be empty for components which don't need it dataDirs := spec.MultiDirAbs(base.User, inst.DataDir()) // log dir will always be with values, but might not used by the component logDir := spec.Abs(base.User, inst.LogDir()) deployDirs := []string{ deployDir, filepath.Join(deployDir, "bin"), filepath.Join(deployDir, "conf"), filepath.Join(deployDir, "scripts"), } // Deploy component tb := task.NewSimpleUerSSH(m.logger, inst.GetManageHost(), inst.GetSSHPort(), base.User, gOpt, p, sshType). Mkdir(base.User, inst.GetManageHost(), sudo, deployDirs...). Mkdir(base.User, inst.GetManageHost(), sudo, dataDirs...). Mkdir(base.User, inst.GetManageHost(), sudo, logDir) srcPath := "" if patchedComponents.Exist(inst.ComponentName()) { srcPath = specManager.Path(name, spec.PatchDirName, inst.ComponentName()+".tar.gz") } if deployerInstance, ok := inst.(DeployerInstance); ok { deployerInstance.Deploy(tb, srcPath, deployDir, version, name, version) } else { // copy dependency component if needed switch inst.ComponentName() { case spec.ComponentTiSpark: env := environment.GlobalEnv() var sparkVer utils.Version if sparkVer, _, iterErr = env.V1Repository().LatestStableVersion(spec.ComponentSpark, false); iterErr != nil { return } tb = tb.DeploySpark(inst, sparkVer.String(), srcPath, deployDir) default: tb.CopyComponent( inst.ComponentSource(), inst.OS(), inst.Arch(), inst.CalculateVersion(version), srcPath, inst.GetManageHost(), deployDir, ) } } deployCompTasks = append(deployCompTasks, tb.BuildAsStep(fmt.Sprintf(" - Deploy instance %s -> %s", inst.ComponentName(), inst.ID()))) }) if iterErr != nil { return nil, iterErr } // Download and copy the latest component to remote if the cluster is imported from Ansible mergedTopo.IterInstance(func(inst spec.Instance) { if inst.IsImported() { deployDir := spec.Abs(base.User, inst.DeployDir()) // data dir would be empty for components which don't need it // Download and copy the latest component to remote if the cluster is imported from Ansible tb := task.NewBuilder(m.logger) version := inst.CalculateVersion(base.Version) switch compName := inst.ComponentName(); compName { case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: tb.Download(compName, inst.OS(), inst.Arch(), version). CopyComponent(compName, inst.OS(), inst.Arch(), version, "", inst.GetManageHost(), deployDir) } deployCompTasks = append(deployCompTasks, tb.BuildAsStep(fmt.Sprintf(" - Deploy instance %s -> %s", inst.ComponentName(), inst.ID()))) } }) // init scale out config scaleOutConfigTasks := buildScaleConfigTasks(m, name, topo, newPart, base, gOpt, p) certificateTasks, err := buildCertificateTasks(m, name, newPart, base, gOpt, p) if err != nil { return nil, err } sessionCertTasks, err := buildSessionCertTasks(m, name, topo, newPart, base, gOpt, p) if err != nil { return nil, err } certificateTasks = append(certificateTasks, sessionCertTasks...) // always ignore config check result in scale out gOpt.IgnoreConfigCheck = true refreshConfigTasks, hasImported := buildInitConfigTasks(m, name, mergedTopo, base, gOpt, nil) // handle dir scheme changes if hasImported { if err := spec.HandleImportPathMigration(name); err != nil { return task.NewBuilder(m.logger).Build(), err } } _, noAgentHosts := getMonitorHosts(mergedTopo) // Deploy monitor relevant components to remote dlTasks, dpTasks, err := buildMonitoredDeployTask( m, uninitializedHosts, noAgentHosts, topo.BaseTopo().GlobalOptions, topo.BaseTopo().MonitoredOptions, gOpt, p, ) if err != nil { return nil, err } downloadCompTasks = append(downloadCompTasks, dlTasks...) deployCompTasks = append(deployCompTasks, dpTasks...) // monitor config monitorConfigTasks := buildInitMonitoredConfigTasks( m.specManager, name, uninitializedHosts, noAgentHosts, *topo.BaseTopo().GlobalOptions, topo.GetMonitoredOptions(), m.logger, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt, p, ) // monitor tls file moniterCertificateTasks, err := buildMonitoredCertificateTasks( m, name, uninitializedHosts, noAgentHosts, topo.BaseTopo().GlobalOptions, topo.GetMonitoredOptions(), gOpt, p, ) if err != nil { return nil, err } certificateTasks = append(certificateTasks, moniterCertificateTasks...) builder, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return nil, err } // stage2 just start and init config if !opt.Stage2 { builder. ParallelStep("+ Download TiDB components", gOpt.Force, downloadCompTasks...). ParallelStep("+ Initialize target host environments", gOpt.Force, envInitTasks...). ParallelStep("+ Deploy TiDB instance", gOpt.Force, deployCompTasks...). ParallelStep("+ Copy certificate to remote host", gOpt.Force, certificateTasks...). ParallelStep("+ Generate scale-out config", gOpt.Force, scaleOutConfigTasks...). ParallelStep("+ Init monitor config", gOpt.Force, monitorConfigTasks...) } if afterDeploy != nil { afterDeploy(builder, newPart, gOpt) } builder.Func("Save meta", func(_ context.Context) error { metadata.SetTopology(mergedTopo) return m.specManager.SaveMeta(name, metadata) }) // don't start the new instance if opt.Stage1 { // save scale out file lock builder.Func("Create scale-out file lock", func(_ context.Context) error { return m.specManager.NewScaleOutLock(name, newPart) }) } else { builder.Func("Start new instances", func(ctx context.Context) error { return operator.Start(ctx, newPart, operator.Options{ OptTimeout: gOpt.OptTimeout, Operation: operator.ScaleOutOperation, }, false, /* restoreLeader */ tlsCfg, ) }). ParallelStep("+ Refresh components conifgs", gOpt.Force, refreshConfigTasks...). ParallelStep("+ Reload prometheus and grafana", gOpt.Force, buildReloadPromAndGrafanaTasks(metadata.GetTopology(), m.logger, gOpt)...) } // remove scale-out file lock if opt.Stage2 { builder.Func("Release Scale-Out File Lock", func(ctx context.Context) error { return m.specManager.ReleaseScaleOutLock(name) }) } if final != nil { final(builder, name, metadata, gOpt) } return builder.Build(), nil } // buildScaleConfigTasks generates certificate for instance and transfers it to the server func buildScaleConfigTasks( m *Manager, name string, topo spec.Topology, newPart spec.Topology, base *spec.BaseMeta, gOpt operator.Options, p *tui.SSHConnectionProps) []*task.StepDisplay { var ( scaleConfigTasks []*task.StepDisplay // tasks which are used to copy certificate to remote host ) // copy certificate to remote host newPart.IterInstance(func(inst spec.Instance) { deployDir := spec.Abs(base.User, inst.DeployDir()) // data dir would be empty for components which don't need it dataDirs := spec.MultiDirAbs(base.User, inst.DataDir()) // log dir will always be with values, but might not used by the component logDir := spec.Abs(base.User, inst.LogDir()) t := task.NewSimpleUerSSH(m.logger, inst.GetManageHost(), inst.GetSSHPort(), base.User, gOpt, p, topo.BaseTopo().GlobalOptions.SSHType). ScaleConfig( name, base.Version, m.specManager, topo, inst, base.User, meta.DirPaths{ Deploy: deployDir, Data: dataDirs, Log: logDir, }, ).BuildAsStep(fmt.Sprintf(" - Generate scale-out config %s -> %s", inst.ComponentName(), inst.ID())) scaleConfigTasks = append(scaleConfigTasks, t) }) return scaleConfigTasks } type hostInfo struct { ssh int // ssh port of host os string // operating system arch string // cpu architecture // vendor string } func buildMonitoredDeployTask( m *Manager, uniqueHosts map[string]hostInfo, // host -> ssh-port, os, arch noAgentHosts set.StringSet, // hosts that do not deploy monitor agents globalOptions *spec.GlobalOptions, monitoredOptions *spec.MonitoredOptions, gOpt operator.Options, p *tui.SSHConnectionProps, ) (downloadCompTasks []*task.StepDisplay, deployCompTasks []*task.StepDisplay, err error) { if monitoredOptions == nil { return } uniqueCompOSArch := set.NewStringSet() // monitoring agents for _, comp := range []string{spec.ComponentNodeExporter, spec.ComponentBlackboxExporter} { version := monitoredOptions.NodeExporterVersion if comp == spec.ComponentBlackboxExporter { version = monitoredOptions.BlackboxExporterVersion } for host, info := range uniqueHosts { // skip deploying monitoring agents if the instance is marked so if noAgentHosts.Exist(host) { continue } // populate unique comp-os-arch set key := fmt.Sprintf("%s-%s-%s", comp, info.os, info.arch) if found := uniqueCompOSArch.Exist(key); !found { uniqueCompOSArch.Insert(key) downloadCompTasks = append(downloadCompTasks, task.NewBuilder(m.logger). Download(comp, info.os, info.arch, version). BuildAsStep(fmt.Sprintf(" - Download %s:%s (%s/%s)", comp, version, info.os, info.arch))) } deployDir := spec.Abs(globalOptions.User, monitoredOptions.DeployDir) // data dir would be empty for components which don't need it dataDir := monitoredOptions.DataDir // the default data_dir is relative to deploy_dir if dataDir != "" && !strings.HasPrefix(dataDir, "/") { dataDir = filepath.Join(deployDir, dataDir) } // log dir will always be with values, but might not used by the component logDir := spec.Abs(globalOptions.User, monitoredOptions.LogDir) deployDirs := []string{ deployDir, dataDir, logDir, filepath.Join(deployDir, "bin"), filepath.Join(deployDir, "conf"), filepath.Join(deployDir, "scripts"), } // Deploy component tb := task.NewSimpleUerSSH(m.logger, host, info.ssh, globalOptions.User, gOpt, p, globalOptions.SSHType). Mkdir(globalOptions.User, host, globalOptions.SystemdMode != spec.UserMode, deployDirs...). CopyComponent( comp, info.os, info.arch, version, "", host, deployDir, ) deployCompTasks = append(deployCompTasks, tb.BuildAsStep(fmt.Sprintf(" - Deploy %s -> %s", comp, host))) } } return } // buildMonitoredCertificateTasks generates certificate for instance and transfers it to the server func buildMonitoredCertificateTasks( m *Manager, name string, uniqueHosts map[string]hostInfo, // host -> ssh-port, os, arch noAgentHosts set.StringSet, // hosts that do not deploy monitor agents globalOptions *spec.GlobalOptions, monitoredOptions *spec.MonitoredOptions, gOpt operator.Options, p *tui.SSHConnectionProps, ) ([]*task.StepDisplay, error) { var certificateTasks []*task.StepDisplay if monitoredOptions == nil { return certificateTasks, nil } if globalOptions.TLSEnabled { // monitoring agents for _, comp := range []string{spec.ComponentNodeExporter, spec.ComponentBlackboxExporter} { for host, info := range uniqueHosts { // skip deploying monitoring agents if the instance is marked so if noAgentHosts.Exist(host) { continue } deployDir := spec.Abs(globalOptions.User, monitoredOptions.DeployDir) tlsDir := filepath.Join(deployDir, spec.TLSCertKeyDir) // Deploy component tb := task.NewSimpleUerSSH(m.logger, host, info.ssh, globalOptions.User, gOpt, p, globalOptions.SSHType). Mkdir(globalOptions.User, host, globalOptions.SystemdMode != spec.UserMode, tlsDir) if comp == spec.ComponentBlackboxExporter { ca, innerr := crypto.ReadCA( name, m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCACert), m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCAKey), ) if innerr != nil { return certificateTasks, innerr } tb = tb.TLSCert( host, spec.ComponentBlackboxExporter, spec.ComponentBlackboxExporter, monitoredOptions.BlackboxExporterPort, ca, meta.DirPaths{ Deploy: deployDir, Cache: m.specManager.Path(name, spec.TempConfigPath), }) } certificateTasks = append(certificateTasks, tb.BuildAsStep(fmt.Sprintf(" - Generate certificate %s -> %s", comp, host))) } } } return certificateTasks, nil } func buildInitMonitoredConfigTasks( specManager *spec.SpecManager, name string, uniqueHosts map[string]hostInfo, // host -> ssh-port, os, arch noAgentHosts set.StringSet, globalOptions spec.GlobalOptions, monitoredOptions *spec.MonitoredOptions, logger *logprinter.Logger, sshTimeout, exeTimeout uint64, gOpt operator.Options, p *tui.SSHConnectionProps, ) []*task.StepDisplay { if monitoredOptions == nil { return nil } tasks := []*task.StepDisplay{} // monitoring agents for _, comp := range []string{spec.ComponentNodeExporter, spec.ComponentBlackboxExporter} { for host, info := range uniqueHosts { if noAgentHosts.Exist(host) { continue } deployDir := spec.Abs(globalOptions.User, monitoredOptions.DeployDir) // data dir would be empty for components which don't need it dataDir := monitoredOptions.DataDir // the default data_dir is relative to deploy_dir if dataDir != "" && !strings.HasPrefix(dataDir, "/") { dataDir = filepath.Join(deployDir, dataDir) } // log dir will always be with values, but might not used by the component logDir := spec.Abs(globalOptions.User, monitoredOptions.LogDir) // Generate configs t := task.NewSimpleUerSSH(logger, host, info.ssh, globalOptions.User, gOpt, p, globalOptions.SSHType). MonitoredConfig( name, comp, host, globalOptions.ResourceControl, monitoredOptions, globalOptions.User, globalOptions.TLSEnabled, meta.DirPaths{ Deploy: deployDir, Data: []string{dataDir}, Log: logDir, Cache: specManager.Path(name, spec.TempConfigPath), }, globalOptions.SystemdMode, ). BuildAsStep(fmt.Sprintf(" - Generate config %s -> %s", comp, host)) tasks = append(tasks, t) } } return tasks } func buildInitConfigTasks( m *Manager, name string, topo spec.Topology, base *spec.BaseMeta, gOpt operator.Options, nodes []string, ) ([]*task.StepDisplay, bool) { var tasks []*task.StepDisplay hasImported := false deletedNodes := set.NewStringSet(nodes...) topo.IterInstance(func(instance spec.Instance) { if deletedNodes.Exist(instance.ID()) { return } compName := instance.ComponentName() deployDir := spec.Abs(base.User, instance.DeployDir()) // data dir would be empty for components which don't need it dataDirs := spec.MultiDirAbs(base.User, instance.DataDir()) // log dir will always be with values, but might not used by the component logDir := spec.Abs(base.User, instance.LogDir()) // Download and copy the latest component to remote if the cluster is imported from Ansible tb := task.NewBuilder(m.logger) if instance.IsImported() { version := instance.CalculateVersion(base.Version) switch compName { case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: tb.Download(compName, instance.OS(), instance.Arch(), version). CopyComponent( compName, instance.OS(), instance.Arch(), version, "", // use default srcPath instance.GetManageHost(), deployDir, ) } hasImported = true } t := tb. InitConfig( name, base.Version, m.specManager, instance, base.User, gOpt.IgnoreConfigCheck, meta.DirPaths{ Deploy: deployDir, Data: dataDirs, Log: logDir, Cache: m.specManager.Path(name, spec.TempConfigPath), }, ). BuildAsStep(fmt.Sprintf(" - Generate config %s -> %s", compName, instance.ID())) tasks = append(tasks, t) }) return tasks, hasImported } // buildDownloadCompTasks build download component tasks func buildDownloadCompTasks( clusterVersion string, topo spec.Topology, logger *logprinter.Logger, gOpt operator.Options, ) []*task.StepDisplay { var tasks []*task.StepDisplay uniqueTaskList := set.NewStringSet() topo.IterInstance(func(inst spec.Instance) { key := fmt.Sprintf("%s-%s-%s", inst.ComponentSource(), inst.OS(), inst.Arch()) if found := uniqueTaskList.Exist(key); !found { uniqueTaskList.Insert(key) // we don't set version for tispark, so the latest tispark will be used var version string if inst.ComponentName() == spec.ComponentTiSpark { // download spark as dependency of tispark tasks = append(tasks, buildDownloadSparkTask(inst, logger, gOpt)) } else { version = inst.CalculateVersion(clusterVersion) } t := task.NewBuilder(logger). Download(inst.ComponentSource(), inst.OS(), inst.Arch(), version). BuildAsStep(fmt.Sprintf(" - Download %s:%s (%s/%s)", inst.ComponentSource(), version, inst.OS(), inst.Arch())) tasks = append(tasks, t) } }) return tasks } // buildDownloadSparkTask build download task for spark, which is a dependency of tispark // FIXME: this is a hack and should be replaced by dependency handling in manifest processing func buildDownloadSparkTask(inst spec.Instance, logger *logprinter.Logger, gOpt operator.Options) *task.StepDisplay { return task.NewBuilder(logger). Download(spec.ComponentSpark, inst.OS(), inst.Arch(), ""). BuildAsStep(fmt.Sprintf(" - Download %s: (%s/%s)", spec.ComponentSpark, inst.OS(), inst.Arch())) } // buildTLSTask create enable/disable tls task func buildTLSTask( m *Manager, name string, metadata spec.Metadata, gOpt operator.Options, reloadCertificate bool, p *tui.SSHConnectionProps, delFileMap map[string]set.StringSet, ) (task.Task, error) { topo := metadata.GetTopology() base := metadata.GetBaseMeta() // load certificate file if topo.BaseTopo().GlobalOptions.TLSEnabled { tlsDir := m.specManager.Path(name, spec.TLSCertKeyDir) m.logger.Infof("Generate certificate: %s", color.YellowString(tlsDir)) if err := m.loadCertificate(name, topo.BaseTopo().GlobalOptions, reloadCertificate); err != nil { return nil, err } } certificateTasks, err := buildCertificateTasks(m, name, topo, base, gOpt, p) if err != nil { return nil, err } refreshConfigTasks, hasImported := buildInitConfigTasks(m, name, topo, base, gOpt, nil) // handle dir scheme changes if hasImported { if err := spec.HandleImportPathMigration(name); err != nil { return task.NewBuilder(m.logger).Build(), err } } // monitor uniqueHosts, noAgentHosts := getMonitorHosts(topo) moniterCertificateTasks, err := buildMonitoredCertificateTasks( m, name, uniqueHosts, noAgentHosts, topo.BaseTopo().GlobalOptions, topo.GetMonitoredOptions(), gOpt, p, ) if err != nil { return nil, err } monitorConfigTasks := buildInitMonitoredConfigTasks( m.specManager, name, uniqueHosts, noAgentHosts, *topo.BaseTopo().GlobalOptions, topo.GetMonitoredOptions(), m.logger, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt, p, ) builder, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return nil, err } builder. ParallelStep("+ Copy certificate to remote host", gOpt.Force, certificateTasks...). ParallelStep("+ Copy monitor certificate to remote host", gOpt.Force, moniterCertificateTasks...). ParallelStep("+ Refresh instance configs", gOpt.Force, refreshConfigTasks...). ParallelStep("+ Refresh monitor configs", gOpt.Force, monitorConfigTasks...). Func("Save meta", func(_ context.Context) error { return m.specManager.SaveMeta(name, metadata) }) // cleanup tls files only in tls disable if !topo.BaseTopo().GlobalOptions.TLSEnabled { builder.Func("Cleanup TLS files", func(ctx context.Context) error { return operator.CleanupComponent(ctx, delFileMap, topo.BaseTopo().GlobalOptions.SystemdMode != spec.UserMode) }) } tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return nil, err } builder. Func("Restart Cluster", func(ctx context.Context) error { return operator.Restart(ctx, topo, gOpt, tlsCfg) }). Func("Reload PD Members", func(ctx context.Context) error { return operator.SetPDMember(ctx, name, topo.BaseTopo().GlobalOptions.TLSEnabled, tlsCfg, metadata) }) return builder.Build(), nil } func genTiProxySessionCerts(dir string) error { if err := os.MkdirAll(dir, 0755); err != nil { return err } ca, err := crypto.NewCA("tiproxy") if err != nil { return err } privKey, err := crypto.NewKeyPair(crypto.KeyTypeRSA, crypto.KeySchemeRSASSAPSSSHA256) if err != nil { return err } csr, err := privKey.CSR("tiproxy", "tiproxy", nil, nil) if err != nil { return err } cert, err := ca.Sign(csr) if err != nil { return err } if err := utils.SaveFileWithBackup(filepath.Join(dir, "tiproxy-session.key"), privKey.Pem(), ""); err != nil { return err } return utils.SaveFileWithBackup(filepath.Join(dir, "tiproxy-session.crt"), pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: cert, }), "") } // buildSessionCertTasks puts a self-signed cert to all TiDB if there is tiproxy. // For deploy: originalTopo = nil, newTopo = topology. // For scale-out: originalTopo = original topology, newTopo = new topology. func buildSessionCertTasks(m *Manager, name string, originalTopo spec.Topology, newTopo spec.Topology, base *spec.BaseMeta, gOpt operator.Options, p *tui.SSHConnectionProps) ([]*task.StepDisplay, error) { var certificateTasks []*task.StepDisplay // tasks which are used to copy certificate to remote host hasOriginalTiProxy := false if originalTopo != nil { originalTopo.IterInstance(func(inst spec.Instance) { if inst.ComponentName() == spec.ComponentTiProxy { hasOriginalTiProxy = true } }) } hasNewTiProxy := false newTopo.IterInstance(func(inst spec.Instance) { if inst.ComponentName() == spec.ComponentTiProxy { hasNewTiProxy = true } }) if !hasOriginalTiProxy && !hasNewTiProxy { return nil, nil } tempPath := m.specManager.Path(name, spec.TempConfigPath) keyPath := filepath.Join(tempPath, "tiproxy-session.key") certPath := filepath.Join(tempPath, "tiproxy-session.crt") copySessionCerts := func(inst spec.Instance) { if inst.ComponentName() != spec.ComponentTiDB { return } deployDir := spec.Abs(base.User, inst.DeployDir()) tlsDir := filepath.Join(deployDir, spec.TLSCertKeyDir) tb := task.NewSimpleUerSSH(m.logger, inst.GetManageHost(), inst.GetSSHPort(), base.User, gOpt, p, newTopo.BaseTopo().GlobalOptions.SSHType). Mkdir(base.User, inst.GetManageHost(), newTopo.BaseTopo().GlobalOptions.SystemdMode != spec.UserMode, deployDir, tlsDir) tb = tb. CopyFile(keyPath, filepath.Join(deployDir, spec.TLSCertKeyDir, "tiproxy-session.key"), inst.GetHost(), false, 0, false). CopyFile(certPath, filepath.Join(deployDir, spec.TLSCertKeyDir, "tiproxy-session.crt"), inst.GetHost(), false, 0, false) t := tb.BuildAsStep(fmt.Sprintf(" - Copy session certificate %s -> %s", inst.ComponentName(), inst.ID())) certificateTasks = append(certificateTasks, t) } // If TiProxy is just enabled now (either deploy or scale-out), issue a session cert and copy the cert to original TiDB. if !hasOriginalTiProxy { if err := genTiProxySessionCerts(tempPath); err != nil { return certificateTasks, err } if originalTopo != nil { originalTopo.IterInstance(copySessionCerts) } } // Copy the session cert to new TiDB. newTopo.IterInstance(copySessionCerts) return certificateTasks, nil } // buildCertificateTasks generates certificate for instance and transfers it to the server func buildCertificateTasks( m *Manager, name string, topo spec.Topology, base *spec.BaseMeta, gOpt operator.Options, p *tui.SSHConnectionProps) ([]*task.StepDisplay, error) { var ( iterErr error certificateTasks []*task.StepDisplay // tasks which are used to copy certificate to remote host ) // copy TLS certificate to remote host if topo.BaseTopo().GlobalOptions.TLSEnabled { topo.IterInstance(func(inst spec.Instance) { deployDir := spec.Abs(base.User, inst.DeployDir()) tlsDir := filepath.Join(deployDir, spec.TLSCertKeyDir) tb := task.NewSimpleUerSSH(m.logger, inst.GetManageHost(), inst.GetSSHPort(), base.User, gOpt, p, topo.BaseTopo().GlobalOptions.SSHType). Mkdir(base.User, inst.GetManageHost(), topo.BaseTopo().GlobalOptions.SystemdMode != spec.UserMode, deployDir, tlsDir) ca, err := crypto.ReadCA( name, m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCACert), m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCAKey), ) if err != nil { iterErr = err return } tb = tb.TLSCert( inst.GetHost(), inst.ComponentName(), inst.Role(), inst.GetMainPort(), ca, meta.DirPaths{ Deploy: deployDir, Cache: m.specManager.Path(name, spec.TempConfigPath), }) t := tb.BuildAsStep(fmt.Sprintf(" - Generate certificate %s -> %s", inst.ComponentName(), inst.ID())) certificateTasks = append(certificateTasks, t) }) } return certificateTasks, iterErr } tiup-1.16.3/pkg/cluster/manager/cacert.go000066400000000000000000000112331505422223000202220ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "crypto/x509" "encoding/pem" "fmt" "path/filepath" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/crypto" "github.com/pingcap/tiup/pkg/utils" ) func genAndSaveClusterCA(name, tlsPath string) (*crypto.CertificateAuthority, error) { ca, err := crypto.NewCA(name) if err != nil { return nil, err } // save CA private key if err := utils.SaveFileWithBackup(filepath.Join(tlsPath, spec.TLSCAKey), ca.Key.Pem(), ""); err != nil { return nil, perrs.Annotatef(err, "cannot save CA private key for %s", name) } // save CA certificate if err := utils.SaveFileWithBackup( filepath.Join(tlsPath, spec.TLSCACert), pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: ca.Cert.Raw, }), ""); err != nil { return nil, perrs.Annotatef(err, "cannot save CA certificate for %s", name) } return ca, nil } func genAndSaveClientCert(ca *crypto.CertificateAuthority, name, tlsPath string) error { privKey, err := crypto.NewKeyPair(crypto.KeyTypeRSA, crypto.KeySchemeRSASSAPSSSHA256) if err != nil { return err } // save client private key if err := utils.SaveFileWithBackup(filepath.Join(tlsPath, spec.TLSClientKey), privKey.Pem(), ""); err != nil { return perrs.Annotatef(err, "cannot save client private key for %s", name) } csr, err := privKey.CSR( "tiup-cluster-client", fmt.Sprintf("%s-client", name), []string{}, []string{}, ) if err != nil { return perrs.Annotatef(err, "cannot generate CSR of client certificate for %s", name) } cert, err := ca.Sign(csr) if err != nil { return perrs.Annotatef(err, "cannot sign client certificate for %s", name) } // save client certificate if err := utils.SaveFileWithBackup( filepath.Join(tlsPath, spec.TLSClientCert), pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: cert, }), ""); err != nil { return perrs.Annotatef(err, "cannot save client PEM certificate for %s", name) } // save pfx format certificate clientCert, err := x509.ParseCertificate(cert) if err != nil { return perrs.Annotatef(err, "cannot decode signed client certificate for %s", name) } pfxData, err := privKey.PKCS12(clientCert, ca) if err != nil { return perrs.Annotatef(err, "cannot encode client certificate to PKCS#12 format for %s", name) } if err := utils.SaveFileWithBackup( filepath.Join(tlsPath, spec.PFXClientCert), pfxData, ""); err != nil { return perrs.Annotatef(err, "cannot save client PKCS#12 certificate for %s", name) } return nil } // genAndSaveCertificate generate CA and client cert for TLS enabled cluster func (m *Manager) genAndSaveCertificate(clusterName string, globalOptions *spec.GlobalOptions) (*crypto.CertificateAuthority, error) { var ca *crypto.CertificateAuthority if globalOptions.TLSEnabled { // generate CA tlsPath := m.specManager.Path(clusterName, spec.TLSCertKeyDir) if err := utils.MkdirAll(tlsPath, 0755); err != nil { return nil, err } ca, err := genAndSaveClusterCA(clusterName, tlsPath) if err != nil { return nil, err } // generate client cert if err = genAndSaveClientCert(ca, clusterName, tlsPath); err != nil { return nil, err } } return ca, nil } // checkCertificate check if the certificate file exists // no need to determine whether to enable tls func (m *Manager) checkCertificate(clusterName string) error { tlsFiles := []string{ m.specManager.Path(clusterName, spec.TLSCertKeyDir, spec.TLSCACert), m.specManager.Path(clusterName, spec.TLSCertKeyDir, spec.TLSClientKey), m.specManager.Path(clusterName, spec.TLSCertKeyDir, spec.TLSClientCert), } // check if the file exists for _, file := range tlsFiles { if !utils.IsExist(file) { return perrs.Errorf("TLS file: %s does not exist", file) } } return nil } // loadCertificate // certificate file exists and reload is true // will reload certificate file func (m *Manager) loadCertificate(clusterName string, globalOptions *spec.GlobalOptions, reload bool) error { err := m.checkCertificate(clusterName) // no need to reload and the file already exists if !reload && err == nil { return nil } _, err = m.genAndSaveCertificate(clusterName, globalOptions) return err } tiup-1.16.3/pkg/cluster/manager/check.go000066400000000000000000000523041505422223000200420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "encoding/json" "fmt" "path/filepath" "strings" "time" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // CheckOptions contains the options for check command type CheckOptions struct { User string // username to login to the SSH server IdentityFile string // path to the private key file UsePassword bool // use password instead of identity file for ssh connection Opr *operator.CheckOptions ApplyFix bool // try to apply fixes of failed checks ExistCluster bool // check an exist cluster TempDir string // tempdir } // CheckCluster check cluster before deploying or upgrading func (m *Manager) CheckCluster(clusterOrTopoName, scaleoutTopo string, opt CheckOptions, gOpt operator.Options) error { var topo spec.Specification ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) var currTopo *spec.Specification if opt.ExistCluster { // check for existing cluster clusterName := clusterOrTopoName exist, err := m.specManager.Exist(clusterName) if err != nil { return err } if !exist { return perrs.Errorf("cluster %s does not exist", clusterName) } metadata, err := spec.ClusterMetadata(clusterName) if err != nil { return err } if scaleoutTopo != "" { currTopo = metadata.Topology // complete global configuration topo.GlobalOptions = currTopo.GlobalOptions topo.MonitoredOptions = currTopo.MonitoredOptions topo.ServerConfigs = currTopo.ServerConfigs if err := spec.ParseTopologyYaml(scaleoutTopo, &topo, true); err != nil { return err } spec.ExpandRelativeDir(&topo) // checkConflict after fillHostArch // scaleOutTopo also is not exists instacne opt.ExistCluster = false } else { opt.IdentityFile = m.specManager.Path(clusterName, "ssh", "id_rsa") topo = *metadata.Topology opt.User = metadata.User } topo.AdjustByVersion(metadata.Version) } else { // check before cluster is deployed topoFileName := clusterOrTopoName if err := spec.ParseTopologyYaml(topoFileName, &topo); err != nil { return err } spec.ExpandRelativeDir(&topo) if err := checkConflict(m, "nonexist-dummy-tidb-cluster", &topo); err != nil { return err } } var ( sshConnProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} sshProxyProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} ) if gOpt.SSHType != executor.SSHTypeNone { var err error if sshConnProps, err = tui.ReadIdentityFileOrPassword(opt.IdentityFile, opt.UsePassword); err != nil { return err } if len(gOpt.SSHProxyHost) != 0 { if sshProxyProps, err = tui.ReadIdentityFileOrPassword(gOpt.SSHProxyIdentity, gOpt.SSHProxyUsePassword); err != nil { return err } } } var sudo bool if topo.BaseTopo().GlobalOptions.SystemdMode == spec.UserMode { sudo = false } else { sudo = opt.User != "root" } if err := m.fillHost(sshConnProps, sshProxyProps, &topo, &gOpt, opt.User, sudo); err != nil { return err } // Abort scale out operation if the merged topology is invalid if currTopo != nil && scaleoutTopo != "" { mergedTopo := currTopo.MergeTopo(&topo) if err := mergedTopo.Validate(); err != nil { return err } if err := checkConflict(m, clusterOrTopoName, mergedTopo); err != nil { return err } if err := checkSystemInfo(ctx, sshConnProps, sshProxyProps, &topo, &gOpt, &opt, mergedTopo.(*spec.Specification)); err != nil { return err } } else { if err := checkSystemInfo(ctx, sshConnProps, sshProxyProps, &topo, &gOpt, &opt, &topo); err != nil { return err } } if !opt.ExistCluster { return nil } // following checks are all for existing cluster // check PD status return m.checkRegionsInfo(clusterOrTopoName, &topo, &gOpt) } // HostCheckResult represents the check result of each node type HostCheckResult struct { Node string `json:"node"` Name string `json:"name"` Status string `json:"status"` Message string `json:"message"` } // checkSystemInfo performs series of checks and tests of the deploy server func checkSystemInfo( ctx context.Context, s, p *tui.SSHConnectionProps, topo *spec.Specification, gOpt *operator.Options, opt *CheckOptions, fullTopo *spec.Specification, ) error { var ( collectTasks []*task.StepDisplay checkTimeZoneTasks []*task.StepDisplay checkSysTasks []*task.StepDisplay cleanTasks []*task.StepDisplay applyFixTasks []*task.StepDisplay downloadTasks []*task.StepDisplay ) task.CheckToolsPathDir = opt.TempDir logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) insightVer := "" uniqueHosts := map[string]int{} // host -> ssh-port uniqueArchList := make(map[string]struct{}) // map["os-arch"]{} insightNodes := []spec.Instance{} roleFilter := set.NewStringSet(gOpt.Roles...) nodeFilter := set.NewStringSet(gOpt.Nodes...) components := topo.ComponentsByStartOrder() components = operator.FilterComponent(components, roleFilter) systemdDir := "/etc/systemd/system/" systemdMode := topo.BaseTopo().GlobalOptions.SystemdMode if systemdMode == spec.UserMode { systemdDir = "~/.config/systemd/user/" } for _, comp := range components { instances := operator.FilterInstance(comp.Instances(), nodeFilter) if len(instances) < 1 { continue } for _, inst := range instances { archKey := fmt.Sprintf("%s-%s", inst.OS(), inst.Arch()) if _, found := uniqueArchList[archKey]; !found { uniqueArchList[archKey] = struct{}{} t0 := task.NewBuilder(logger). Download( spec.ComponentCheckCollector, inst.OS(), inst.Arch(), insightVer, ). BuildAsStep(fmt.Sprintf(" - Downloading check tools for %s/%s", inst.OS(), inst.Arch())) downloadTasks = append(downloadTasks, t0) } t1 := task.NewBuilder(logger) // checks that applies to each instance if opt.ExistCluster { t1 = t1.CheckSys( inst.GetManageHost(), inst.DeployDir(), task.CheckTypePermission, topo, opt.Opr, ) } else { t1 = t1. CheckSys( inst.GetManageHost(), inst.DeployDir(), task.ChecktypeIsExist, topo, opt.Opr, ). CheckSys( inst.GetManageHost(), inst.DataDir(), task.ChecktypeIsExist, topo, opt.Opr, ). CheckSys( inst.GetManageHost(), inst.LogDir(), task.ChecktypeIsExist, topo, opt.Opr, ). CheckSys( inst.GetManageHost(), fmt.Sprintf("%s%s-%d.service", systemdDir, inst.ComponentName(), inst.GetPort()), task.ChecktypeIsExist, topo, opt.Opr, ) } // if the data dir set in topology is relative, and the home dir of deploy user // and the user run the check command is on different partitions, the disk detection // may be using incorrect partition for validations. for _, dataDir := range spec.MultiDirAbs(opt.User, inst.DataDir()) { // build checking tasks t1 = t1. CheckSys( inst.GetManageHost(), dataDir, task.CheckTypeFIO, topo, opt.Opr, ) if opt.ExistCluster { t1 = t1.CheckSys( inst.GetManageHost(), dataDir, task.CheckTypePermission, topo, opt.Opr, ) } } checkSysTasks = append( checkSysTasks, t1.BuildAsStep(fmt.Sprintf(" - Checking node %s", inst.GetManageHost())), ) if _, found := uniqueHosts[inst.GetManageHost()]; !found { uniqueHosts[inst.GetManageHost()] = inst.GetSSHPort() insightNodes = append(insightNodes, inst) } } } existPD := (&spec.PDComponent{Topology: fullTopo}).Instances() if len(existPD) < 1 { return fmt.Errorf("cannot find PD in exist cluster") } if _, found := uniqueHosts[existPD[0].GetManageHost()]; !found { insightNodes = append(insightNodes, existPD[0]) } for _, inst := range insightNodes { // build system info collecting tasks t2 := task.NewBuilder(logger). RootSSH( inst.GetManageHost(), inst.GetSSHPort(), opt.User, s.Password, s.IdentityFile, s.IdentityFilePassphrase, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, p.Password, p.IdentityFile, p.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, topo.GlobalOptions.SSHType, opt.User != "root" && systemdMode != spec.UserMode, ). Mkdir(opt.User, inst.GetManageHost(), systemdMode != spec.UserMode, filepath.Join(task.CheckToolsPathDir, "bin")). CopyComponent( spec.ComponentCheckCollector, inst.OS(), inst.Arch(), insightVer, "", // use default srcPath inst.GetManageHost(), task.CheckToolsPathDir, ). Shell( inst.GetManageHost(), filepath.Join(task.CheckToolsPathDir, "bin", "insight"), "", false, ). BuildAsStep(" - Getting system info of " + utils.JoinHostPort(inst.GetManageHost(), inst.GetSSHPort())) collectTasks = append(collectTasks, t2) t3 := task.NewBuilder(logger). RootSSH( inst.GetManageHost(), inst.GetSSHPort(), opt.User, s.Password, s.IdentityFile, s.IdentityFilePassphrase, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, p.Password, p.IdentityFile, p.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, topo.GlobalOptions.SSHType, opt.User != "root" && systemdMode != spec.UserMode, ). Rmdir(inst.GetManageHost(), task.CheckToolsPathDir). BuildAsStep(" - Cleanup check files on " + utils.JoinHostPort(inst.GetManageHost(), inst.GetSSHPort())) cleanTasks = append(cleanTasks, t3) } for host := range uniqueHosts { t4 := task.NewBuilder(logger). // check for time zone CheckSys( host, "", task.CheckTypeTimeZone, fullTopo, opt.Opr, ) checkTimeZoneTasks = append( checkTimeZoneTasks, t4.BuildAsStep(fmt.Sprintf(" - Checking node %s", host)), ) t1 := task.NewBuilder(logger). // check for general system info CheckSys( host, "", task.CheckTypeSystemInfo, topo, opt.Opr, ). CheckSys( host, "", task.CheckTypePartitions, topo, opt.Opr, ). // check for system limits Shell( host, "cat /etc/security/limits.conf", "", false, ). CheckSys( host, "", task.CheckTypeSystemLimits, topo, opt.Opr, ). // check for kernel params Shell( host, "sysctl -a", "", systemdMode != spec.UserMode, ). CheckSys( host, "", task.CheckTypeSystemConfig, topo, opt.Opr, ). // check for needed system service CheckSys( host, "", task.CheckTypeService, topo, opt.Opr, ). // check for needed packages CheckSys( host, "", task.CheckTypePackage, topo, opt.Opr, ) if !opt.ExistCluster { t1 = t1. // check for listening port Shell( host, "ss -lnt", "", false, ). CheckSys( host, "", task.CheckTypePort, topo, opt.Opr, ) } checkSysTasks = append( checkSysTasks, t1.BuildAsStep(fmt.Sprintf(" - Checking node %s", host)), ) } t := task.NewBuilder(logger). ParallelStep("+ Download necessary tools", false, downloadTasks...). ParallelStep("+ Collect basic system information", false, collectTasks...). ParallelStep("+ Check time zone", false, checkTimeZoneTasks...). ParallelStep("+ Check system requirements", false, checkSysTasks...). ParallelStep("+ Cleanup check files", false, cleanTasks...). Build() if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } checkResultTable := [][]string{ // Header {"Node", "Check", "Result", "Message"}, } checkResults := make([]HostCheckResult, 0) for host := range uniqueHosts { tf := task.NewBuilder(logger). RootSSH( host, uniqueHosts[host], opt.User, s.Password, s.IdentityFile, s.IdentityFilePassphrase, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, p.Password, p.IdentityFile, p.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, topo.GlobalOptions.SSHType, opt.User != "root" && systemdMode != spec.UserMode, ) res, err := handleCheckResults(ctx, host, opt, tf, string(topo.BaseTopo().GlobalOptions.SystemdMode)) if err != nil { continue } checkResults = append(checkResults, res...) applyFixTasks = append(applyFixTasks, tf.BuildAsStep(fmt.Sprintf(" - Applying changes on %s", host))) } checkResults = deduplicateCheckResult(checkResults) if gOpt.DisplayMode == "json" { checkResultStruct := make([]HostCheckResult, 0) for _, r := range checkResults { checkResultStruct = append(checkResultStruct, HostCheckResult{ r.Node, r.Name, r.Status, r.Message, }) } data, err := json.Marshal(struct { Result []HostCheckResult `json:"result"` }{Result: checkResultStruct}) if err != nil { return err } fmt.Println(string(data)) } else { resLines := formatHostCheckResults(checkResults) checkResultTable = append(checkResultTable, resLines...) // print check results *before* trying to applying checks // FIXME: add fix result to output, and display the table after fixing tui.PrintTable(checkResultTable, true) } if opt.ApplyFix { tc := task.NewBuilder(logger). ParallelStep("+ Try to apply changes to fix failed checks", false, applyFixTasks...). Build() if err := tc.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } } return nil } // handleCheckResults parses the result of checks func handleCheckResults(ctx context.Context, host string, opt *CheckOptions, t *task.Builder, systemdMode string) ([]HostCheckResult, error) { rr, _ := ctxt.GetInner(ctx).GetCheckResults(host) if len(rr) < 1 { return nil, fmt.Errorf("no check results found for %s", host) } results := []*operator.CheckResult{} for _, r := range rr { results = append(results, r.(*operator.CheckResult)) } items := make([]HostCheckResult, 0) // m.logger.Infof("Check results of %s: (only errors and important info are displayed)", color.HiCyanString(host)) for _, r := range results { var item HostCheckResult if r.Err != nil { if r.IsWarning() { item = HostCheckResult{Node: host, Name: r.Name, Status: "Warn", Message: r.Error()} } else { item = HostCheckResult{Node: host, Name: r.Name, Status: "Fail", Message: r.Error()} } if !opt.ApplyFix { items = append(items, item) continue } msg, err := fixFailedChecks(host, r, t, systemdMode) if err != nil { ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger). Debugf("%s: fail to apply fix to %s (%s)", host, r.Name, err) } if msg != "" { // show auto fixing info item.Message = msg } } else if r.Msg != "" { item = HostCheckResult{Node: host, Name: r.Name, Status: "Pass", Message: r.Msg} } // show errors and messages only, ignore empty lines // if len(line) > 0 { if len(item.Node) > 0 { items = append(items, item) } } return items, nil } func formatHostCheckResults(results []HostCheckResult) [][]string { lines := make([][]string, 0) for _, r := range results { var coloredStatus string switch r.Status { case "Warn": coloredStatus = color.YellowString(r.Status) case "Fail": coloredStatus = color.HiRedString(r.Status) default: coloredStatus = color.GreenString(r.Status) } line := []string{r.Node, r.Name, coloredStatus, r.Message} lines = append(lines, line) } return lines } // fixFailedChecks tries to automatically apply changes to fix failed checks func fixFailedChecks(host string, res *operator.CheckResult, t *task.Builder, systemdMode string) (string, error) { msg := "" sudo := systemdMode != string(spec.UserMode) switch res.Name { case operator.CheckNameSysService: if strings.Contains(res.Msg, "not found") { return "", nil } fields := strings.Fields(res.Msg) if len(fields) < 2 { return "", fmt.Errorf("can not perform action of service, %s", res.Msg) } t.SystemCtl(host, fields[1], fields[0], false, false, systemdMode) msg = fmt.Sprintf("will try to '%s'", color.HiBlueString(res.Msg)) case operator.CheckNameSysctl: fields := strings.Fields(res.Msg) if len(fields) < 3 { return "", fmt.Errorf("can not set kernel parameter, %s", res.Msg) } t.Sysctl(host, fields[0], fields[2], sudo) msg = fmt.Sprintf("will try to set '%s'", color.HiBlueString(res.Msg)) case operator.CheckNameLimits: fields := strings.Fields(res.Msg) if len(fields) < 4 { return "", fmt.Errorf("can not set limits, %s", res.Msg) } t.Limit(host, fields[0], fields[1], fields[2], fields[3], sudo) msg = fmt.Sprintf("will try to set '%s'", color.HiBlueString(res.Msg)) case operator.CheckNameSELinux: t.Shell(host, fmt.Sprintf( "sed -i 's/^[[:blank:]]*SELINUX=enforcing/SELINUX=disabled/g' %s && %s", "/etc/selinux/config", "setenforce 0", ), "", sudo) msg = fmt.Sprintf("will try to %s, reboot might be needed", color.HiBlueString("disable SELinux")) case operator.CheckNameTHP: t.Shell(host, fmt.Sprintf(`if [ -d %[1]s ]; then echo never > %[1]s/enabled; fi`, "/sys/kernel/mm/transparent_hugepage"), "", sudo) msg = fmt.Sprintf("will try to %s, please check again after reboot", color.HiBlueString("disable THP")) case operator.CheckNameSwap: // not applying swappiness setting here, it should be fixed // in the sysctl check // t.Sysctl(host, "vm.swappiness", "0") t.Shell(host, "swapoff -a || exit 0", // ignore failure "", sudo, ) msg = "will try to disable swap, please also check /etc/fstab manually" default: msg = fmt.Sprintf("%s, auto fixing not supported", res) } return msg, nil } // checkRegionsInfo checks peer status from PD func (m *Manager) checkRegionsInfo(clusterName string, topo *spec.Specification, gOpt *operator.Options) error { m.logger.Infof("Checking region status of the cluster %s...", clusterName) tlsConfig, err := topo.TLSConfig(m.specManager.Path(clusterName, spec.TLSCertKeyDir)) if err != nil { return err } pdClient := api.NewPDClient( context.WithValue(context.TODO(), logprinter.ContextKeyLogger, m.logger), topo.GetPDListWithManageHost(), time.Second*time.Duration(gOpt.APITimeout), tlsConfig, ) hasUnhealthy := false for _, state := range []string{ "miss-peer", "pending-peer", } { rInfo, err := pdClient.CheckRegion(state) if err != nil { return err } if rInfo.Count > 0 { m.logger.Warnf( "Regions are not fully healthy: %s", color.YellowString("%d %s", rInfo.Count, state), ) hasUnhealthy = true } } if hasUnhealthy { m.logger.Warnf("Please fix unhealthy regions before other operations.") } else { m.logger.Infof("All regions are healthy.") } return nil } // checkConflict checks cluster conflict func checkConflict(m *Manager, clusterName string, topo spec.Topology) error { clusterList, err := m.specManager.GetAllClusters() if err != nil { return err } // use a dummy cluster name, the real cluster name is set during deploy if err := spec.CheckClusterPortConflict(clusterList, clusterName, topo); err != nil { return err } err = spec.CheckClusterDirConflict(clusterList, clusterName, topo) return err } // deduplicateCheckResult deduplicate check results func deduplicateCheckResult(checkResults []HostCheckResult) (uniqueResults []HostCheckResult) { // node: {name|status: set(msg)} tmpResultMap := map[string]map[string]set.StringSet{} // deduplicate for _, result := range checkResults { if tmpResultMap[result.Node] == nil { tmpResultMap[result.Node] = make(map[string]set.StringSet) } // insert msg into set msgKey := fmt.Sprintf("%s|%s", result.Name, result.Status) if tmpResultMap[result.Node][msgKey] == nil { tmpResultMap[result.Node][msgKey] = set.NewStringSet() } tmpResultMap[result.Node][msgKey].Insert(result.Message) } for node, msgMap := range tmpResultMap { for checkInfo, msgSet := range msgMap { nameAndstatus := strings.Split(checkInfo, "|") for _, msg := range msgSet.Slice() { uniqueResults = append(uniqueResults, HostCheckResult{ Node: node, Name: nameAndstatus[0], Status: nameAndstatus[1], Message: msg, }) } } } return } tiup-1.16.3/pkg/cluster/manager/cleanup.go000066400000000000000000000231261505422223000204140ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "fmt" "path" "path/filepath" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" ) // CleanCluster cleans the cluster without destroying it func (m *Manager) CleanCluster(name string, gOpt operator.Options, cleanOpt operator.Options, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { return err } metadata, err := m.meta(name) if err != nil { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } // calculate file paths to be deleted before the prompt delFileMap := getCleanupFiles(topo, cleanOpt.CleanupData, cleanOpt.CleanupLog, false, cleanOpt.CleanupAuditLog, cleanOpt.RetainDataRoles, cleanOpt.RetainDataNodes) if !skipConfirm { if err := cleanupConfirm(m.logger, name, m.sysName, base.Version, cleanOpt, delFileMap); err != nil { return err } } m.logger.Infof("Cleanup cluster...") sudo := true if topo.BaseTopo().GlobalOptions.SystemdMode == spec.UserMode { sudo = false } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t := b. Func("StopCluster", func(ctx context.Context) error { return operator.Stop( ctx, topo, operator.Options{}, false, /* eviceLeader */ tlsCfg, ) }). Func("CleanupCluster", func(ctx context.Context) error { return operator.CleanupComponent(ctx, delFileMap, sudo) }). Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } m.logger.Infof("Cleanup%s in cluster `%s` successfully", cleanTarget(cleanOpt), name) return nil } // checkConfirm func cleanupConfirm(logger *logprinter.Logger, clusterName, sysName, version string, cleanOpt operator.Options, delFileMap map[string]set.StringSet) error { logger.Warnf("The clean operation will %s %s %s cluster `%s`", color.HiYellowString("stop"), sysName, version, color.HiYellowString(clusterName)) if err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]:"); err != nil { return err } // build file list string delFileList := "" for host, fileList := range delFileMap { // target host has no files to delete if len(fileList) == 0 { continue } delFileList += fmt.Sprintf("\n%s:", color.CyanString(host)) for _, dfp := range fileList.Slice() { delFileList += fmt.Sprintf("\n %s", dfp) } } logger.Warnf("Clean the clutser %s's%s.\nNodes will be ignored: %s\nRoles will be ignored: %s\nFiles to be deleted are: %s", color.HiYellowString(clusterName), cleanTarget(cleanOpt), cleanOpt.RetainDataNodes, cleanOpt.RetainDataRoles, delFileList) return tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]:") } func cleanTarget(cleanOpt operator.Options) string { target := "" if cleanOpt.CleanupData { target += " data" } if cleanOpt.CleanupLog { target += (" log") } if cleanOpt.CleanupAuditLog { target += (" audit-log") } return color.HiYellowString(target) } // cleanupFiles record the file that needs to be cleaned up type cleanupFiles struct { cleanupData bool // whether to clean up the data cleanupLog bool // whether to clean up the log cleanupTLS bool // whether to clean up the tls files cleanupAuditLog bool // whether to clean up the tidb server audit log retainDataRoles []string // roles that don't clean up retainDataNodes []string // roles that don't clean up ansibleImport bool // cluster is ansible deploy delFileMap map[string]set.StringSet } // getCleanupFiles get the files that need to be deleted func getCleanupFiles(topo spec.Topology, cleanupData, cleanupLog, cleanupTLS, cleanupAuditLog bool, retainDataRoles, retainDataNodes []string) map[string]set.StringSet { c := &cleanupFiles{ cleanupData: cleanupData, cleanupLog: cleanupLog, cleanupTLS: cleanupTLS, cleanupAuditLog: cleanupAuditLog, retainDataRoles: retainDataRoles, retainDataNodes: retainDataNodes, delFileMap: make(map[string]set.StringSet), } // calculate file paths to be deleted before the prompt c.instanceCleanupFiles(topo) c.monitorCleanupFiles(topo) return c.delFileMap } // instanceCleanupFiles get the files that need to be deleted in the component func (c *cleanupFiles) instanceCleanupFiles(topo spec.Topology) { for _, com := range topo.ComponentsByStopOrder() { instances := com.Instances() retainDataRoles := set.NewStringSet(c.retainDataRoles...) retainDataNodes := set.NewStringSet(c.retainDataNodes...) for _, ins := range instances { // not cleaning files of monitor agents if the instance does not have one // may not work switch ins.ComponentName() { case spec.ComponentNodeExporter, spec.ComponentBlackboxExporter: if ins.IgnoreMonitorAgent() { continue } } // Some data of instances will be retained dataRetained := retainDataRoles.Exist(ins.ComponentName()) || retainDataNodes.Exist(ins.ID()) || retainDataNodes.Exist(ins.GetHost()) if dataRetained { continue } // prevent duplicate directories dataPaths := set.NewStringSet() logPaths := set.NewStringSet() tlsPath := set.NewStringSet() if c.cleanupData && len(ins.DataDir()) > 0 { for dataDir := range strings.SplitSeq(ins.DataDir(), ",") { dataPaths.Insert(path.Join(dataDir, "*")) } } if c.cleanupLog && len(ins.LogDir()) > 0 { for logDir := range strings.SplitSeq(ins.LogDir(), ",") { // need to judge the audit log of tidb server if ins.ComponentName() == spec.ComponentTiDB { logPaths.Insert(path.Join(logDir, "tidb?[!audit]*.log")) logPaths.Insert(path.Join(logDir, "tidb.log")) // maybe no need deleted } else { logPaths.Insert(path.Join(logDir, "*.log")) } } } if c.cleanupAuditLog && ins.ComponentName() == spec.ComponentTiDB { for logDir := range strings.SplitSeq(ins.LogDir(), ",") { logPaths.Insert(path.Join(logDir, "tidb-audit*.log")) } } // clean tls data if c.cleanupTLS && !topo.BaseTopo().GlobalOptions.TLSEnabled { deployDir := spec.Abs(topo.BaseTopo().GlobalOptions.User, ins.DeployDir()) tlsDir := filepath.Join(deployDir, spec.TLSCertKeyDir) tlsPath.Insert(tlsDir) // ansible deploy if ins.IsImported() { ansibleTLSDir := filepath.Join(deployDir, spec.TLSCertKeyDirWithAnsible) tlsPath.Insert(ansibleTLSDir) c.ansibleImport = true } } if c.delFileMap[ins.GetManageHost()] == nil { c.delFileMap[ins.GetManageHost()] = set.NewStringSet() } c.delFileMap[ins.GetManageHost()].Join(logPaths).Join(dataPaths).Join(tlsPath) } } } // monitorCleanupFiles get the files that need to be deleted in the mointor func (c *cleanupFiles) monitorCleanupFiles(topo spec.Topology) { monitoredOptions := topo.BaseTopo().MonitoredOptions if monitoredOptions == nil { return } user := topo.BaseTopo().GlobalOptions.User // get the host with monitor installed uniqueHosts, noAgentHosts := getMonitorHosts(topo) retainDataNodes := set.NewStringSet(c.retainDataNodes...) // monitoring agents for host := range uniqueHosts { // determine if host don't need to delete dataRetained := noAgentHosts.Exist(host) || retainDataNodes.Exist(host) if dataRetained { continue } deployDir := spec.Abs(user, monitoredOptions.DeployDir) // prevent duplicate directories dataPaths := set.NewStringSet() logPaths := set.NewStringSet() tlsPath := set.NewStringSet() // data dir would be empty for components which don't need it dataDir := monitoredOptions.DataDir if c.cleanupData && len(dataDir) > 0 { // the default data_dir is relative to deploy_dir if !strings.HasPrefix(dataDir, "/") { dataDir = filepath.Join(deployDir, dataDir) } dataPaths.Insert(path.Join(dataDir, "*")) } // log dir will always be with values, but might not used by the component logDir := spec.Abs(user, monitoredOptions.LogDir) if c.cleanupLog && len(logDir) > 0 { logPaths.Insert(path.Join(logDir, "*.log")) } // clean tls data if c.cleanupTLS && !topo.BaseTopo().GlobalOptions.TLSEnabled { tlsDir := filepath.Join(deployDir, spec.TLSCertKeyDir) tlsPath.Insert(tlsDir) // ansible deploy if c.ansibleImport { ansibleTLSDir := filepath.Join(deployDir, spec.TLSCertKeyDirWithAnsible) tlsPath.Insert(ansibleTLSDir) } } if c.delFileMap[host] == nil { c.delFileMap[host] = set.NewStringSet() } c.delFileMap[host].Join(logPaths).Join(dataPaths).Join(tlsPath) } } tiup-1.16.3/pkg/cluster/manager/deploy.go000066400000000000000000000322031505422223000202550ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "errors" "fmt" "path/filepath" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // DeployOptions contains the options for scale out. type DeployOptions struct { User string // username to login to the SSH server SkipCreateUser bool // don't create the user IdentityFile string // path to the private key file UsePassword bool // use password instead of identity file for ssh connection NoLabels bool // don't check labels for TiKV instance Stage1 bool // don't start the new instance, just deploy Stage2 bool // start instances and init Config after stage1 } // DeployerInstance is a instance can deploy to a target deploy directory. type DeployerInstance interface { Deploy(b *task.Builder, srcPath string, deployDir string, version string, name string, clusterVersion string) } // Deploy a cluster. func (m *Manager) Deploy( name string, clusterVersion string, topoFile string, opt DeployOptions, afterDeploy func(b *task.Builder, newPart spec.Topology, gOpt operator.Options), skipConfirm bool, gOpt operator.Options, ) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } exist, err := m.specManager.Exist(name) if err != nil { return err } if exist { // FIXME: When change to use args, the suggestion text need to be updatem. return errDeployNameDuplicate. New("Cluster name '%s' is duplicated", name). WithProperty(tui.SuggestionFromFormat("Please specify another cluster name")) } metadata := m.specManager.NewMetadata() topo := metadata.GetTopology() if err := spec.ParseTopologyYaml(topoFile, topo); err != nil { return err } if err := checkTiFlashWithTLS(topo, clusterVersion); err != nil { return err } instCnt := 0 topo.IterInstance(func(inst spec.Instance) { switch inst.ComponentName() { // monitoring components are only useful when deployed with // core components, we do not support deploying any bare // monitoring system. case spec.ComponentGrafana, spec.ComponentPrometheus, spec.ComponentAlertmanager: return } instCnt++ }) if instCnt < 1 { return fmt.Errorf("no valid instance found in the input topology, please check your config") } spec.ExpandRelativeDir(topo) base := topo.BaseTopo() if sshType := gOpt.SSHType; sshType != "" { base.GlobalOptions.SSHType = sshType } if topo, ok := topo.(*spec.Specification); ok { topo.AdjustByVersion(clusterVersion) if !opt.NoLabels { // Check if TiKV's label set correctly lbs, err := topo.LocationLabels() if err != nil { return err } if err := spec.CheckTiKVLabels(lbs, topo); err != nil { return perrs.Errorf("check TiKV label failed, please fix that before continue:\n%s", err) } } } if err := checkConflict(m, name, topo); err != nil { return err } var ( sshConnProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} sshProxyProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} ) if gOpt.SSHType != executor.SSHTypeNone { var err error if sshConnProps, err = tui.ReadIdentityFileOrPassword(opt.IdentityFile, opt.UsePassword); err != nil { return err } if len(gOpt.SSHProxyHost) != 0 { if sshProxyProps, err = tui.ReadIdentityFileOrPassword(gOpt.SSHProxyIdentity, gOpt.SSHProxyUsePassword); err != nil { return err } } } var sudo bool systemdMode := topo.BaseTopo().GlobalOptions.SystemdMode if systemdMode == spec.UserMode { sudo = false hint := fmt.Sprintf("loginctl enable-linger %s", opt.User) msg := "The value of systemd_mode is set to `user` in the topology, please note that you'll need to manually execute the following command using root or sudo on the host(s) to enable lingering for the systemd user instance.\n" msg += color.GreenString(hint) msg += "\nYou can read the systemd documentation for reference: https://wiki.archlinux.org/title/Systemd/User#Automatic_start-up_of_systemd_user_instances." m.logger.Warnf("%s", msg) err = tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: ") if err != nil { return err } } else { sudo = true } if err := m.fillHost(sshConnProps, sshProxyProps, topo, &gOpt, opt.User, opt.User != "root" && systemdMode != spec.UserMode); err != nil { return err } if !skipConfirm && strings.ToLower(gOpt.DisplayMode) != "json" { if err := m.confirmTopology(name, clusterVersion, topo, set.NewStringSet()); err != nil { return err } } if err := utils.MkdirAll(m.specManager.Path(name), 0755); err != nil { return errorx.InitializationFailed. Wrap(err, "Failed to create cluster metadata directory '%s'", m.specManager.Path(name)). WithProperty(tui.SuggestionFromString("Please check file system permissions and try again.")) } var ( envInitTasks []*task.StepDisplay // tasks which are used to initialize environment downloadCompTasks []*task.StepDisplay // tasks which are used to download components deployCompTasks []*task.StepDisplay // tasks which are used to copy components to remote host ) // Initialize environment globalOptions := base.GlobalOptions metadata.SetUser(globalOptions.User) metadata.SetVersion(clusterVersion) var iterErr error // error when itering over instances iterErr = nil topo.IterInstance(func(inst spec.Instance) { // check for "imported" parameter, it can not be true when deploying and scaling out // only for tidb now, need to support dm if inst.IsImported() && m.sysName == "tidb" { iterErr = errors.New( "'imported' is set to 'true' for new instance, this is only used " + "for instances imported from tidb-ansible and make no sense when " + "deploying new instances, please delete the line or set it to 'false' for new instances") return // skip the host to avoid issues } }) // generate CA and client cert for TLS enabled cluster _, err = m.genAndSaveCertificate(name, globalOptions) if err != nil { return err } uniqueHosts, noAgentHosts := getMonitorHosts(topo) for host, hostInfo := range uniqueHosts { var dirs []string for _, dir := range []string{globalOptions.DeployDir, globalOptions.LogDir} { if dir == "" { continue } dirs = append(dirs, spec.Abs(globalOptions.User, dir)) } // the default, relative path of data dir is under deploy dir if strings.HasPrefix(globalOptions.DataDir, "/") { dirs = append(dirs, globalOptions.DataDir) } if systemdMode == spec.UserMode { dirs = append(dirs, spec.Abs(globalOptions.User, ".config/systemd/user")) } t := task.NewBuilder(m.logger). RootSSH( host, hostInfo.ssh, opt.User, sshConnProps.Password, sshConnProps.IdentityFile, sshConnProps.IdentityFilePassphrase, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, sshProxyProps.Password, sshProxyProps.IdentityFile, sshProxyProps.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, globalOptions.SSHType, opt.User != "root" && systemdMode != spec.UserMode, ). EnvInit(host, globalOptions.User, globalOptions.Group, opt.SkipCreateUser || globalOptions.User == opt.User, sudo). Mkdir(globalOptions.User, host, sudo, dirs...). BuildAsStep(fmt.Sprintf(" - Prepare %s:%d", host, hostInfo.ssh)) envInitTasks = append(envInitTasks, t) } if iterErr != nil { return iterErr } // Download missing component downloadCompTasks = buildDownloadCompTasks(clusterVersion, topo, m.logger, gOpt) // Deploy components to remote topo.IterInstance(func(inst spec.Instance) { version := inst.CalculateVersion(clusterVersion) deployDir := spec.Abs(globalOptions.User, inst.DeployDir()) // data dir would be empty for components which don't need it dataDirs := spec.MultiDirAbs(globalOptions.User, inst.DataDir()) // log dir will always be with values, but might not used by the component logDir := spec.Abs(globalOptions.User, inst.LogDir()) // Deploy component // prepare deployment server deployDirs := []string{ deployDir, logDir, filepath.Join(deployDir, "bin"), filepath.Join(deployDir, "conf"), filepath.Join(deployDir, "scripts"), } t := task.NewSimpleUerSSH(m.logger, inst.GetManageHost(), inst.GetSSHPort(), globalOptions.User, gOpt, sshProxyProps, globalOptions.SSHType). Mkdir(globalOptions.User, inst.GetManageHost(), sudo, deployDirs...). Mkdir(globalOptions.User, inst.GetManageHost(), sudo, dataDirs...) if deployerInstance, ok := inst.(DeployerInstance); ok { deployerInstance.Deploy(t, "", deployDir, version, name, clusterVersion) } else { // copy dependency component if needed switch inst.ComponentName() { case spec.ComponentTiSpark: env := environment.GlobalEnv() var sparkVer utils.Version if sparkVer, _, iterErr = env.V1Repository().WithOptions(repository.Options{ GOOS: inst.OS(), GOARCH: inst.Arch(), }).LatestStableVersion(spec.ComponentSpark, false); iterErr != nil { return } t = t.DeploySpark(inst, sparkVer.String(), "" /* default srcPath */, deployDir) default: t = t.CopyComponent( inst.ComponentSource(), inst.OS(), inst.Arch(), version, "", // use default srcPath inst.GetManageHost(), deployDir, ) } } deployCompTasks = append(deployCompTasks, t.BuildAsStep(fmt.Sprintf(" - Copy %s -> %s", inst.ComponentName(), inst.GetManageHost())), ) }) if iterErr != nil { return iterErr } // generates certificate for instance and transfers it to the server certificateTasks, err := buildCertificateTasks(m, name, topo, metadata.GetBaseMeta(), gOpt, sshProxyProps) if err != nil { return err } sessionCertTasks, err := buildSessionCertTasks(m, name, nil, topo, metadata.GetBaseMeta(), gOpt, sshProxyProps) if err != nil { return err } certificateTasks = append(certificateTasks, sessionCertTasks...) refreshConfigTasks, _ := buildInitConfigTasks(m, name, topo, metadata.GetBaseMeta(), gOpt, nil) // Deploy monitor relevant components to remote dlTasks, dpTasks, err := buildMonitoredDeployTask( m, uniqueHosts, noAgentHosts, globalOptions, topo.GetMonitoredOptions(), gOpt, sshProxyProps, ) if err != nil { return err } downloadCompTasks = append(downloadCompTasks, dlTasks...) deployCompTasks = append(deployCompTasks, dpTasks...) // monitor tls file moniterCertificateTasks, err := buildMonitoredCertificateTasks( m, name, uniqueHosts, noAgentHosts, topo.BaseTopo().GlobalOptions, topo.GetMonitoredOptions(), gOpt, sshProxyProps, ) if err != nil { return err } certificateTasks = append(certificateTasks, moniterCertificateTasks...) monitorConfigTasks := buildInitMonitoredConfigTasks( m.specManager, name, uniqueHosts, noAgentHosts, *topo.BaseTopo().GlobalOptions, topo.GetMonitoredOptions(), m.logger, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt, sshProxyProps, ) builder := task.NewBuilder(m.logger). Step("+ Generate SSH keys", task.NewBuilder(m.logger). SSHKeyGen(m.specManager.Path(name, "ssh", "id_rsa")). Build(), m.logger). ParallelStep("+ Download TiDB components", false, downloadCompTasks...). ParallelStep("+ Initialize target host environments", false, envInitTasks...). ParallelStep("+ Deploy TiDB instance", false, deployCompTasks...). ParallelStep("+ Copy certificate to remote host", gOpt.Force, certificateTasks...). ParallelStep("+ Init instance configs", gOpt.Force, refreshConfigTasks...). ParallelStep("+ Init monitor configs", gOpt.Force, monitorConfigTasks...) if afterDeploy != nil { afterDeploy(builder, topo, gOpt) } t := builder.Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return err } err = m.specManager.SaveMeta(name, metadata) if err != nil { return err } var hint string if topo.Type() == spec.TopoTypeTiDB { hint = color.New(color.Bold).Sprintf("%s start %s --init", tui.OsArgs0(), name) } else { hint = color.New(color.Bold).Sprintf("%s start %s", tui.OsArgs0(), name) } m.logger.Infof("Cluster `%s` deployed successfully, you can start it with command: `%s`", name, hint) return nil } tiup-1.16.3/pkg/cluster/manager/destroy.go000066400000000000000000000137761505422223000204700ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "errors" "fmt" "time" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tui" ) // DestroyCluster destroy the cluster. func (m *Manager) DestroyCluster(name string, gOpt operator.Options, destroyOpt operator.Options, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) && !errors.Is(perrs.Cause(err), spec.ErrMultipleTiSparkMaster) && !errors.Is(perrs.Cause(err), spec.ErrMultipleTisparkWorker) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } if !skipConfirm { m.logger.Warnf("%s", color.HiRedString(tui.ASCIIArtWarning)) if err := tui.PromptForAnswerOrAbortError( "Yes, I know my cluster and data will be deleted.", "%s", fmt.Sprintf("This operation will destroy %s %s cluster %s and its data.", m.sysName, color.HiYellowString(base.Version), color.HiYellowString(name), )+"\nAre you sure to continue?", ); err != nil { return err } m.logger.Infof("Destroying cluster...") } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t := b. Func("StopCluster", func(ctx context.Context) error { return operator.Stop( ctx, topo, operator.Options{Force: destroyOpt.Force}, false, /* eviceLeader */ tlsCfg, ) }). Func("DestroyCluster", func(ctx context.Context) error { return operator.Destroy(ctx, topo, destroyOpt) }). Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } if err := m.specManager.Remove(name); err != nil { return perrs.Trace(err) } m.logger.Infof("Destroyed cluster `%s` successfully", name) return nil } // DestroyTombstone destroy and remove instances that is in tombstone state func (m *Manager) DestroyTombstone( name string, gOpt operator.Options, skipConfirm bool, ) error { metadata, err := m.meta(name) // allow specific validation errors so that user can recover a broken // cluster if it is somehow in a bad state. if err != nil && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() clusterMeta := metadata.(*spec.ClusterMeta) cluster := clusterMeta.Topology if !operator.NeedCheckTombstone(cluster) { return nil } tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) nodes, err := operator.DestroyTombstone(ctx, cluster, true /* returnNodesOnly */, gOpt, tlsCfg) if err != nil { return err } t := b. Func("FindTomestoneNodes", func(ctx context.Context) (err error) { if !skipConfirm { err = tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("%s\nDo you confirm this action? [y/N]:", color.HiYellowString("Will destroy these nodes: %v", nodes)), ) if err != nil { return err } } m.logger.Infof("Start destroy Tombstone nodes: %v ...", nodes) return err }). ClusterOperate(cluster, operator.DestroyTombstoneOperation, gOpt, tlsCfg). UpdateMeta(name, clusterMeta, nodes). UpdateTopology(name, m.specManager.Path(name), clusterMeta, nodes). Build() if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } // Destroy ignore error and force exec gOpt.IgnoreConfigCheck = true gOpt.Force = true // get new metadata metadata, err = m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return err } topo = metadata.GetTopology() base = metadata.GetBaseMeta() b, err = m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } regenConfigTasks, _ := buildInitConfigTasks(m, name, topo, base, gOpt, nodes) t = b. ParallelStep("+ Refresh instance configs", gOpt.Force, regenConfigTasks...). ParallelStep("+ Reload prometheus and grafana", gOpt.Force, buildReloadPromAndGrafanaTasks(topo, m.logger, gOpt)...). Func("RemoveTomestoneNodesInPD", func(ctx context.Context) (err error) { pdEndpoints := make([]string, 0) for _, pd := range cluster.PDServers { pdEndpoints = append(pdEndpoints, fmt.Sprintf("%s:%d", pd.Host, pd.ClientPort)) } pdAPI := api.NewPDClient(ctx, pdEndpoints, time.Second*time.Duration(gOpt.APITimeout), tlsCfg) return pdAPI.RemoveTombstone() }). Build() if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } m.logger.Infof("Destroy success") return nil } tiup-1.16.3/pkg/cluster/manager/display.go000066400000000000000000000613361505422223000204370ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "crypto/tls" "encoding/json" "errors" "fmt" "math" "sort" "strconv" "strings" "sync" "time" "github.com/fatih/color" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/crypto" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // DisplayOption represents option of display command type DisplayOption struct { ClusterName string ShowUptime bool ShowProcess bool ShowManageHost bool ShowNuma bool ShowVersions bool } // InstInfo represents an instance info type InstInfo struct { ID string `json:"id"` Role string `json:"role"` Host string `json:"host"` ManageHost string `json:"manage_host"` Ports string `json:"ports"` OsArch string `json:"os_arch"` Status string `json:"status"` Memory string `json:"memory"` MemoryLimit string `json:"memory_limit"` CPUquota string `json:"cpu_quota"` Since string `json:"since"` DataDir string `json:"data_dir"` DeployDir string `json:"deploy_dir"` NumaNode string `json:"numa_node"` NumaCores string `json:"numa_cores"` Version string `json:"version"` ComponentName string Port int } // LabelInfo represents an instance label info type LabelInfo struct { Machine string `json:"machine"` Port string `json:"port"` Store string `json:"store"` Status string `json:"status"` Leaders string `json:"leaders"` Regions string `json:"regions"` Capacity string `json:"capacity"` Available string `json:"available"` Labels string `json:"labels"` } // ClusterMetaInfo hold the structure for the JSON output of the dashboard info type ClusterMetaInfo struct { ClusterType string `json:"cluster_type"` ClusterName string `json:"cluster_name"` ClusterVersion string `json:"cluster_version"` DeployUser string `json:"deploy_user"` SSHType string `json:"ssh_type"` TLSEnabled bool `json:"tls_enabled"` TLSCACert string `json:"tls_ca_cert,omitempty"` TLSClientCert string `json:"tls_client_cert,omitempty"` TLSClientKey string `json:"tls_client_key,omitempty"` DashboardURL string `json:"dashboard_url,omitempty"` DashboardURLS []string `json:"dashboard_urls,omitempty"` GrafanaURLS []string `json:"grafana_urls,omitempty"` } // JSONOutput holds the structure for the JSON output of `tiup cluster display --json` type JSONOutput struct { ClusterMetaInfo ClusterMetaInfo `json:"cluster_meta"` InstanceInfos []InstInfo `json:"instances,omitempty"` LocationLabel string `json:"location_label,omitempty"` LabelInfos []api.LabelInfo `json:"labels,omitempty"` } // Display cluster meta and topology. func (m *Manager) Display(dopt DisplayOption, opt operator.Options) error { name := dopt.ClusterName if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } clusterInstInfos, err := m.GetClusterTopology(dopt, opt) if err != nil { return err } metadata, _ := m.meta(name) topo := metadata.GetTopology() base := metadata.GetBaseMeta() cyan := color.New(color.FgCyan, color.Bold) // check if managehost is set if !dopt.ShowManageHost { topo.IterInstance(func(inst spec.Instance) { if inst.GetHost() != inst.GetManageHost() { dopt.ShowManageHost = true return } }) } statusTimeout := time.Duration(opt.APITimeout) * time.Second // display cluster meta var j *JSONOutput if m.logger.GetDisplayMode() == logprinter.DisplayModeJSON { j = &JSONOutput{ ClusterMetaInfo: ClusterMetaInfo{ m.sysName, name, base.Version, topo.BaseTopo().GlobalOptions.User, string(topo.BaseTopo().GlobalOptions.SSHType), topo.BaseTopo().GlobalOptions.TLSEnabled, "", // CA Cert "", // Client Cert "", // Client Key "", nil, nil, }, InstanceInfos: clusterInstInfos, } if topo.BaseTopo().GlobalOptions.TLSEnabled { j.ClusterMetaInfo.TLSCACert = m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCACert) j.ClusterMetaInfo.TLSClientKey = m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientKey) j.ClusterMetaInfo.TLSClientCert = m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientCert) } } else { fmt.Printf("Cluster type: %s\n", cyan.Sprint(m.sysName)) fmt.Printf("Cluster name: %s\n", cyan.Sprint(name)) fmt.Printf("Cluster version: %s\n", cyan.Sprint(base.Version)) fmt.Printf("Deploy user: %s\n", cyan.Sprint(topo.BaseTopo().GlobalOptions.User)) fmt.Printf("SSH type: %s\n", cyan.Sprint(topo.BaseTopo().GlobalOptions.SSHType)) // display TLS info if topo.BaseTopo().GlobalOptions.TLSEnabled { fmt.Printf("TLS encryption: %s\n", cyan.Sprint("enabled")) fmt.Printf("CA certificate: %s\n", cyan.Sprint( m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCACert), )) fmt.Printf("Client private key: %s\n", cyan.Sprint( m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientKey), )) fmt.Printf("Client certificate: %s\n", cyan.Sprint( m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientCert), )) } } // display topology var clusterTable [][]string rowHead := []string{"ID", "Role", "Host"} if dopt.ShowManageHost { rowHead = append(rowHead, "Manage Host") } rowHead = append(rowHead, "Ports", "OS/Arch", "Status") if dopt.ShowProcess { rowHead = append(rowHead, "Memory", "Memory Limit", "CPU Quota") } if dopt.ShowUptime { rowHead = append(rowHead, "Since") } if dopt.ShowNuma { rowHead = append(rowHead, "Numa Node", "Numa Cores") } if dopt.ShowVersions { rowHead = append(rowHead, "Version") } rowHead = append(rowHead, "Data Dir", "Deploy Dir") clusterTable = append(clusterTable, rowHead) masterActive := make([]string, 0) for _, v := range clusterInstInfos { row := []string{ color.CyanString(v.ID), v.Role, v.Host, } if dopt.ShowManageHost { row = append(row, v.ManageHost) } row = append(row, v.Ports, v.OsArch, formatInstanceStatus(v.Status)) if dopt.ShowProcess { row = append(row, v.Memory, v.MemoryLimit, v.CPUquota) } if dopt.ShowUptime { row = append(row, v.Since) } if dopt.ShowNuma { row = append(row, v.NumaNode, v.NumaCores) } if dopt.ShowVersions { row = append(row, v.Version) } row = append(row, v.DataDir, v.DeployDir) clusterTable = append(clusterTable, row) if v.ComponentName != spec.ComponentPD && v.ComponentName != spec.ComponentDMMaster { continue } if strings.HasPrefix(v.Status, "Up") || strings.HasPrefix(v.Status, "Healthy") { instAddr := utils.JoinHostPort(v.ManageHost, v.Port) masterActive = append(masterActive, instAddr) } } tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } ctx := ctxt.New( context.Background(), opt.Concurrency, m.logger, ) if t, ok := topo.(*spec.Specification); ok { _ = m.displayDashboards(ctx, t, j, statusTimeout, tlsCfg, "", masterActive...) } if m.logger.GetDisplayMode() == logprinter.DisplayModeJSON { grafanaURLs := getGrafanaURL(clusterInstInfos) if len(grafanaURLs) != 0 { j.ClusterMetaInfo.GrafanaURLS = grafanaURLs } } else { urls, exist := getGrafanaURLStr(clusterInstInfos) if exist { fmt.Printf("Grafana URL: %s\n", cyan.Sprintf("%s", urls)) } } if m.logger.GetDisplayMode() == logprinter.DisplayModeJSON { d, err := json.MarshalIndent(j, "", " ") if err != nil { return err } fmt.Println(string(d)) return nil } tui.PrintTable(clusterTable, true) fmt.Printf("Total nodes: %d\n", len(clusterTable)-1) if t, ok := topo.(*spec.Specification); ok { // Check if TiKV's label set correctly pdClient := api.NewPDClient( context.WithValue(ctx, logprinter.ContextKeyLogger, m.logger), masterActive, 10*time.Second, tlsCfg, ) if lbs, placementRule, err := pdClient.GetLocationLabels(); err != nil { m.logger.Debugf("get location labels from pd failed: %v", err) } else if !placementRule { if err := spec.CheckTiKVLabels(lbs, pdClient); err != nil { color.Yellow("\nWARN: there is something wrong with TiKV labels, which may cause data losing:\n%v", err) } } // Check if there is some instance in tombstone state nodes, _ := operator.DestroyTombstone(ctx, t, true /* returnNodesOnly */, opt, tlsCfg) if len(nodes) != 0 { color.Green("There are some nodes can be pruned: \n\tNodes: %+v\n\tYou can destroy them with the command: `tiup cluster prune %s`", nodes, name) } } return nil } func getGrafanaURL(clusterInstInfos []InstInfo) (result []string) { var grafanaURLs []string for _, instance := range clusterInstInfos { if instance.Role == "grafana" || instance.Role == "grafana (patched)" { grafanaURLs = append(grafanaURLs, "http://"+utils.JoinHostPort(instance.Host, instance.Port)) } } return grafanaURLs } func getGrafanaURLStr(clusterInstInfos []InstInfo) (result string, exist bool) { grafanaURLs := getGrafanaURL(clusterInstInfos) if len(grafanaURLs) == 0 { return "", false } return strings.Join(grafanaURLs, ","), true } // DisplayTiKVLabels display cluster tikv labels func (m *Manager) DisplayTiKVLabels(dopt DisplayOption, opt operator.Options) error { name := dopt.ClusterName if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } clusterInstInfos, err := m.GetClusterTopology(dopt, opt) if err != nil { return err } metadata, _ := m.meta(name) topo := metadata.GetTopology() base := metadata.GetBaseMeta() statusTimeout := time.Duration(opt.APITimeout) * time.Second // display cluster meta cyan := color.New(color.FgCyan, color.Bold) var j *JSONOutput if strings.ToLower(opt.DisplayMode) == "json" { j = &JSONOutput{ ClusterMetaInfo: ClusterMetaInfo{ m.sysName, name, base.Version, topo.BaseTopo().GlobalOptions.User, string(topo.BaseTopo().GlobalOptions.SSHType), topo.BaseTopo().GlobalOptions.TLSEnabled, "", // CA Cert "", // Client Cert "", // Client Key "", nil, nil, }, } if topo.BaseTopo().GlobalOptions.TLSEnabled { j.ClusterMetaInfo.TLSCACert = m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCACert) j.ClusterMetaInfo.TLSClientKey = m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientKey) j.ClusterMetaInfo.TLSClientCert = m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientCert) } } else { fmt.Printf("Cluster type: %s\n", cyan.Sprint(m.sysName)) fmt.Printf("Cluster name: %s\n", cyan.Sprint(name)) fmt.Printf("Cluster version: %s\n", cyan.Sprint(base.Version)) fmt.Printf("SSH type: %s\n", cyan.Sprint(topo.BaseTopo().GlobalOptions.SSHType)) fmt.Printf("Component name: %s\n", cyan.Sprint("TiKV")) // display TLS info if topo.BaseTopo().GlobalOptions.TLSEnabled { fmt.Printf("TLS encryption: %s\n", cyan.Sprint("enabled")) fmt.Printf("CA certificate: %s\n", cyan.Sprint( m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSCACert), )) fmt.Printf("Client private key: %s\n", cyan.Sprint( m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientKey), )) fmt.Printf("Client certificate: %s\n", cyan.Sprint( m.specManager.Path(name, spec.TLSCertKeyDir, spec.TLSClientCert), )) } } // display topology var clusterTable [][]string clusterTable = append(clusterTable, []string{"Machine", "Port", "Store", "Status", "Leaders", "Regions", "Capacity", "Available", "Labels"}) masterActive := make([]string, 0) tikvStoreIP := make(map[string]struct{}) for _, v := range clusterInstInfos { if v.ComponentName == spec.ComponentTiKV { tikvStoreIP[v.Host] = struct{}{} } } ctx := ctxt.New( context.Background(), opt.Concurrency, m.logger, ) masterList := topo.BaseTopo().MasterList tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } var mu sync.Mutex topo.IterInstance(func(ins spec.Instance) { if ins.ComponentName() == spec.ComponentPD { status := ins.Status(ctx, statusTimeout, tlsCfg, masterList...) if strings.HasPrefix(status, "Up") || strings.HasPrefix(status, "Healthy") { instAddr := utils.JoinHostPort(ins.GetManageHost(), ins.GetPort()) mu.Lock() masterActive = append(masterActive, instAddr) mu.Unlock() } } }, opt.Concurrency) var ( labelInfoArr []api.LabelInfo locationLabel []string ) if _, ok := topo.(*spec.Specification); ok { // Check if TiKV's label set correctly pdClient := api.NewPDClient(ctx, masterActive, 10*time.Second, tlsCfg) // No locationLabel, _, err = pdClient.GetLocationLabels() if err != nil { m.logger.Debugf("get location labels from pd failed: %v", err) } _, storeInfos, err := pdClient.GetTiKVLabels() if err != nil { m.logger.Debugf("get tikv state and labels from pd failed: %v", err) } for storeIP := range tikvStoreIP { row := []string{ color.CyanString(storeIP), "", "", "", "", "", "", "", "", } clusterTable = append(clusterTable, row) for _, val := range storeInfos { if store, ok := val[storeIP]; ok { row := []string{ "", store.Port, strconv.FormatUint(store.Store, 10), color.CyanString(store.Status), fmt.Sprintf("%v", store.Leaders), fmt.Sprintf("%v", store.Regions), store.Capacity, store.Available, store.Labels, } clusterTable = append(clusterTable, row) labelInfoArr = append(labelInfoArr, store) } } } } if strings.ToLower(opt.DisplayMode) == "json" { j.LocationLabel = strings.Join(locationLabel, ",") j.LabelInfos = labelInfoArr d, err := json.MarshalIndent(j, "", " ") if err != nil { return err } fmt.Println(string(d)) return nil } fmt.Printf("Location labels: %s\n", cyan.Sprint(strings.Join(locationLabel, ","))) tui.PrintTable(clusterTable, true) fmt.Printf("Total nodes: %d\n", len(clusterTable)-1) return nil } // GetClusterTopology get the topology of the cluster. func (m *Manager) GetClusterTopology(dopt DisplayOption, opt operator.Options) ([]InstInfo, error) { ctx := ctxt.New( context.Background(), opt.Concurrency, m.logger, ) name := dopt.ClusterName metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return nil, err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() statusTimeout := time.Duration(opt.APITimeout) * time.Second err = SetSSHKeySet(ctx, m.specManager.Path(name, "ssh", "id_rsa"), m.specManager.Path(name, "ssh", "id_rsa.pub")) if err != nil { return nil, err } err = SetClusterSSH(ctx, topo, base.User, opt.SSHTimeout, opt.SSHType, topo.BaseTopo().GlobalOptions.SSHType) if err != nil { return nil, err } filterRoles := set.NewStringSet(opt.Roles...) filterNodes := set.NewStringSet(opt.Nodes...) masterList := topo.BaseTopo().MasterList tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return nil, err } masterActive := make([]string, 0) masterStatus := make(map[string]string) var mu sync.Mutex topo.IterInstance(func(ins spec.Instance) { if ins.ComponentName() != spec.ComponentPD && ins.ComponentName() != spec.ComponentDMMaster { return } status := ins.Status(ctx, statusTimeout, tlsCfg, masterList...) mu.Lock() if strings.HasPrefix(status, "Up") || strings.HasPrefix(status, "Healthy") { instAddr := utils.JoinHostPort(ins.GetManageHost(), ins.GetPort()) masterActive = append(masterActive, instAddr) } masterStatus[ins.ID()] = status mu.Unlock() }, opt.Concurrency) var dashboardAddr string if t, ok := topo.(*spec.Specification); ok { dashboardAddr, _ = t.GetPDDashboardAddress(ctx, tlsCfg, statusTimeout, masterActive...) } clusterInstInfos := []InstInfo{} systemdMode := string(topo.BaseTopo().GlobalOptions.SystemdMode) topo.IterInstance(func(ins spec.Instance) { // apply role filter if len(filterRoles) > 0 && !filterRoles.Exist(ins.Role()) { return } // apply node filter if len(filterNodes) > 0 && !filterNodes.Exist(ins.ID()) { return } dataDir := "-" insDirs := ins.UsedDirs() deployDir := insDirs[0] if len(insDirs) > 1 { dataDir = insDirs[1] } var status, memory string switch ins.ComponentName() { case spec.ComponentPD: status = masterStatus[ins.ID()] instAddr := utils.JoinHostPort(ins.GetManageHost(), ins.GetPort()) if dashboardAddr == instAddr { status += "|UI" } case spec.ComponentDMMaster: status = masterStatus[ins.ID()] default: status = ins.Status(ctx, statusTimeout, tlsCfg, masterActive...) } since := "-" if dopt.ShowUptime { since = formatInstanceSince(ins.Uptime(ctx, statusTimeout, tlsCfg)) } // Query the service status and uptime if status == "-" || (dopt.ShowUptime && since == "-") || dopt.ShowProcess { e, found := ctxt.GetInner(ctx).GetExecutor(ins.GetManageHost()) if found { var active string var systemdSince time.Duration nctx := checkpoint.NewContext(ctx) active, memory, systemdSince, _ = operator.GetServiceStatus(nctx, e, ins.ServiceName(), systemdMode, systemdMode) if status == "-" { if active == "active" { status = "Up" } else { status = active } } if dopt.ShowUptime && since == "-" { since = formatInstanceSince(systemdSince) } } } // check if the role is patched roleName := ins.Role() // get extended name for TiFlash to distinguish disaggregated mode. if ins.ComponentName() == spec.ComponentTiFlash { tiflashInstance := ins.(*spec.TiFlashInstance) tiflashSpec := tiflashInstance.InstanceSpec.(*spec.TiFlashSpec) roleName += tiflashSpec.GetExtendedRole(ctx, tlsCfg, masterActive...) } if ins.IsPatched() { roleName += " (patched)" } rc := ins.ResourceControl() mu.Lock() clusterInstInfos = append(clusterInstInfos, InstInfo{ ID: ins.ID(), Role: roleName, Host: ins.GetHost(), ManageHost: ins.GetManageHost(), Ports: utils.JoinInt(ins.UsedPorts(), "/"), OsArch: tui.OsArch(ins.OS(), ins.Arch()), Status: status, Memory: utils.Ternary(memory == "", "-", memory).(string), MemoryLimit: utils.Ternary(rc.MemoryLimit == "", "-", rc.MemoryLimit).(string), CPUquota: utils.Ternary(rc.CPUQuota == "", "-", rc.CPUQuota).(string), DataDir: dataDir, DeployDir: deployDir, ComponentName: ins.ComponentName(), Port: ins.GetPort(), Since: since, NumaNode: utils.Ternary(ins.GetNumaNode() == "", "-", ins.GetNumaNode()).(string), NumaCores: utils.Ternary(ins.GetNumaCores() == "", "-", ins.GetNumaCores()).(string), Version: ins.CalculateVersion(base.Version), }) mu.Unlock() }, opt.Concurrency) // Sort by role,host,ports sort.Slice(clusterInstInfos, func(i, j int) bool { lhs, rhs := clusterInstInfos[i], clusterInstInfos[j] if lhs.Role != rhs.Role { return lhs.Role < rhs.Role } if lhs.Host != rhs.Host { return lhs.Host < rhs.Host } return lhs.Ports < rhs.Ports }) return clusterInstInfos, nil } func formatInstanceStatus(status string) string { lowercaseStatus := strings.ToLower(status) startsWith := func(prefixs ...string) bool { for _, prefix := range prefixs { if strings.HasPrefix(lowercaseStatus, prefix) { return true } } return false } switch { case startsWith("up|l", "healthy|l"): // up|l, up|l|ui, healthy|l return color.HiGreenString(status) case startsWith("up", "healthy", "free"): return color.GreenString(status) case startsWith("down", "err", "inactive"): // down, down|ui return color.RedString(status) case startsWith("tombstone", "disconnected", "n/a"), strings.Contains(strings.ToLower(status), "offline"): return color.YellowString(status) default: return status } } func formatInstanceSince(uptime time.Duration) string { if uptime == 0 { return "-" } d := int64(uptime.Hours() / 24) h := int64(math.Mod(uptime.Hours(), 24)) m := int64(math.Mod(uptime.Minutes(), 60)) s := int64(math.Mod(uptime.Seconds(), 60)) chunks := []struct { unit string value int64 }{ {"d", d}, {"h", h}, {"m", m}, {"s", s}, } parts := []string{} for _, chunk := range chunks { switch chunk.value { case 0: continue default: parts = append(parts, fmt.Sprintf("%d%s", chunk.value, chunk.unit)) } } return strings.Join(parts, "") } // SetSSHKeySet set ssh key set. func SetSSHKeySet(ctx context.Context, privateKeyPath string, publicKeyPath string) error { ctxt.GetInner(ctx).PrivateKeyPath = privateKeyPath ctxt.GetInner(ctx).PublicKeyPath = publicKeyPath return nil } // SetClusterSSH set cluster user ssh executor in context. func SetClusterSSH(ctx context.Context, topo spec.Topology, deployUser string, sshTimeout uint64, sshType, defaultSSHType executor.SSHType) error { if sshType == "" { sshType = defaultSSHType } if len(ctxt.GetInner(ctx).PrivateKeyPath) == 0 { return perrs.Errorf("context has no PrivateKeyPath") } for _, com := range topo.ComponentsByStartOrder() { for _, in := range com.Instances() { cf := executor.SSHConfig{ Host: in.GetManageHost(), Port: in.GetSSHPort(), KeyFile: ctxt.GetInner(ctx).PrivateKeyPath, User: deployUser, Timeout: time.Second * time.Duration(sshTimeout), } e, err := executor.New(sshType, false, cf) if err != nil { return err } ctxt.GetInner(ctx).SetExecutor(in.GetManageHost(), e) } } return nil } // DisplayDashboardInfo prints the dashboard address of cluster func (m *Manager) DisplayDashboardInfo(clusterName string, timeout time.Duration, tlsCfg *tls.Config) error { metadata, err := spec.ClusterMetadata(clusterName) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return err } ctx := context.WithValue(context.Background(), logprinter.ContextKeyLogger, m.logger) return m.displayDashboards(ctx, metadata.Topology, nil, timeout, tlsCfg, clusterName, metadata.Topology.GetPDListWithManageHost()...) } func (m *Manager) displayDashboards(ctx context.Context, t *spec.Specification, j *JSONOutput, timeout time.Duration, tlsCfg *tls.Config, clusterName string, pdList ...string) error { dashboardAddrs := []string{} t.IterInstance(func(ins spec.Instance) { if ins.Role() != spec.ComponentDashboard { return } dashboardAddrs = append(dashboardAddrs, utils.JoinHostPort(ins.GetManageHost(), ins.GetPort())) }) pdDashboardAddr, err := t.GetPDDashboardAddress(ctx, tlsCfg, timeout, pdList...) if err == nil && !set.NewStringSet("", "auto", "none").Exist(pdDashboardAddr) { dashboardAddrs = append(dashboardAddrs, pdDashboardAddr) } if len(dashboardAddrs) == 0 { return fmt.Errorf("TiDB Dashboard is missing, try again later") } if clusterName != "" && tlsCfg != nil { fmt.Println( "Client certificate:", color.CyanString(m.specManager.Path(clusterName, spec.TLSCertKeyDir, spec.PFXClientCert)), ) fmt.Println( "Certificate password:", color.CyanString(crypto.PKCS12Password), ) } for i, addr := range dashboardAddrs { scheme := "http" // show the original info if addr == pdDashboardAddr { if tlsCfg != nil { scheme = "https" } if m.logger.GetDisplayMode() == logprinter.DisplayModeJSON && j != nil { j.ClusterMetaInfo.DashboardURL = fmt.Sprintf("%s://%s/dashboard", scheme, addr) } else { fmt.Printf("Dashboard URL: %s\n", color.CyanString("%s://%s/dashboard", scheme, addr)) } } if m.logger.GetDisplayMode() == logprinter.DisplayModeJSON && j != nil { j.ClusterMetaInfo.DashboardURLS = append(j.ClusterMetaInfo.DashboardURLS, fmt.Sprintf("%s://%s/dashboard", scheme, addr)) } else { dashboardAddrs[i] = color.CyanString("%s://%s/dashboard", scheme, addr) } } if m.logger.GetDisplayMode() != logprinter.DisplayModeJSON || j == nil { fmt.Printf("Dashboard URLs: %s\n", strings.Join(dashboardAddrs, ",")) } return nil } tiup-1.16.3/pkg/cluster/manager/display_test.go000066400000000000000000000025761505422223000214770ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "testing" "github.com/stretchr/testify/assert" ) func TestGetGrafanaURLStr(t *testing.T) { var str string var exist bool str, exist = getGrafanaURLStr([]InstInfo{ { Role: "grafana", Port: 3000, Host: "127.0.0.1", }, { Role: "others", Port: 3000, Host: "127.0.0.1", }, }) assert.Equal(t, exist, true) assert.Equal(t, "http://127.0.0.1:3000", str) str, exist = getGrafanaURLStr([]InstInfo{ { Role: "grafana", Port: 3000, Host: "127.0.0.1", }, { Role: "grafana", Port: 3000, Host: "127.0.0.2", }, }) assert.Equal(t, exist, true) assert.Equal(t, "http://127.0.0.1:3000,http://127.0.0.2:3000", str) _, exist = getGrafanaURLStr([]InstInfo{ { Role: "others", Port: 3000, Host: "127.0.0.1", }, { Role: "others", Port: 3000, Host: "127.0.0.2", }, }) assert.Equal(t, exist, false) } tiup-1.16.3/pkg/cluster/manager/edit_config.go000066400000000000000000000106051505422223000212350ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "bytes" "errors" "fmt" "io" "os" "github.com/fatih/color" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "gopkg.in/yaml.v3" ) // EditConfigOptions contains the options for config edition. type EditConfigOptions struct { NewTopoFile string // path to new topology file to substitute the original one } // EditConfig lets the user edit the cluster's config. func (m *Manager) EditConfig(name string, opt EditConfigOptions, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } topo := metadata.GetTopology() data, err := yaml.Marshal(topo) if err != nil { return perrs.AddStack(err) } newTopo, err := m.editTopo(topo, data, opt, skipConfirm) if err != nil { return err } if newTopo == nil { return nil } m.logger.Infof("Applying changes...") metadata.SetTopology(newTopo) err = m.specManager.SaveMeta(name, metadata) if err != nil { return perrs.Annotate(err, "failed to save meta") } m.logger.Infof("Applied successfully, please use `%s reload %s [-N ] [-R ]` to reload config.", tui.OsArgs0(), name) return nil } // If the flag --topology-file is specified, the first 2 steps will be skipped. // 1. Write Topology to a temporary file. // 2. Open file in editor. // 3. Check and update Topology. // 4. Save meta file. func (m *Manager) editTopo(origTopo spec.Topology, data []byte, opt EditConfigOptions, skipConfirm bool) (spec.Topology, error) { var name string if opt.NewTopoFile == "" { file, err := os.CreateTemp(os.TempDir(), "*") if err != nil { return nil, perrs.AddStack(err) } name = file.Name() _, err = io.Copy(file, bytes.NewReader(data)) if err != nil { return nil, perrs.AddStack(err) } err = file.Close() if err != nil { return nil, perrs.AddStack(err) } err = utils.OpenFileInEditor(name) if err != nil { return nil, err } } else { name = opt.NewTopoFile } // Now user finish editing the file or user has provided the new topology file newData, err := os.ReadFile(name) if err != nil { return nil, perrs.AddStack(err) } newTopo := m.specManager.NewMetadata().GetTopology() decoder := yaml.NewDecoder(bytes.NewReader(newData)) decoder.KnownFields(true) err = decoder.Decode(newTopo) if err != nil { fmt.Print(color.RedString("New topology could not be saved: ")) m.logger.Infof("Failed to parse topology file: %v", err) if opt.NewTopoFile == "" { if pass, _ := tui.PromptForConfirmNo("Do you want to continue editing? [Y/n]: "); !pass { return m.editTopo(origTopo, newData, opt, skipConfirm) } } m.logger.Infof("Nothing changed.") return nil, nil } // report error if immutable field has been changed if err := utils.ValidateSpecDiff(origTopo, newTopo); err != nil { fmt.Print(color.RedString("New topology could not be saved: ")) m.logger.Errorf("%s", err) if opt.NewTopoFile == "" { if pass, _ := tui.PromptForConfirmNo("Do you want to continue editing? [Y/n]: "); !pass { return m.editTopo(origTopo, newData, opt, skipConfirm) } } m.logger.Infof("Nothing changed.") return nil, nil } origData, err := yaml.Marshal(origTopo) if err != nil { return nil, perrs.AddStack(err) } if bytes.Equal(origData, newData) { m.logger.Infof("The file has nothing changed") return nil, nil } utils.ShowDiff(string(origData), string(newData), os.Stdout) if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "%s", color.HiYellowString("Please check change highlight above, do you want to apply the change? [y/N]:"), ); err != nil { return nil, err } } return newTopo, nil } tiup-1.16.3/pkg/cluster/manager/exec.go000066400000000000000000000066631505422223000177200ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" ) // ExecOptions for exec shell commanm. type ExecOptions struct { Command string Sudo bool } // Exec shell command on host in the tidb cluster. func (m *Manager) Exec(name string, opt ExecOptions, gOpt operator.Options) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } metadata, err := m.meta(name) if err != nil { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() filterRoles := set.NewStringSet(gOpt.Roles...) filterNodes := set.NewStringSet(gOpt.Nodes...) var shellTasks []task.Task uniqueHosts := map[string]set.StringSet{} // host-sshPort -> {command} topo.IterInstance(func(inst spec.Instance) { key := utils.JoinHostPort(inst.GetManageHost(), inst.GetSSHPort()) if _, found := uniqueHosts[key]; !found { if len(gOpt.Roles) > 0 && !filterRoles.Exist(inst.Role()) { return } if len(gOpt.Nodes) > 0 && (!filterNodes.Exist(inst.GetHost()) && !filterNodes.Exist(inst.GetManageHost())) { return } cmds, err := renderInstanceSpec(opt.Command, inst) if err != nil { m.logger.Debugf("error rendering command with spec: %s", err) return // skip } cmdSet := set.NewStringSet(cmds...) if _, ok := uniqueHosts[key]; ok { uniqueHosts[key].Join(cmdSet) return } uniqueHosts[key] = cmdSet } }) for hostKey, i := range uniqueHosts { host, _ := utils.ParseHostPort(hostKey) for _, cmd := range i.Slice() { shellTasks = append(shellTasks, task.NewBuilder(m.logger). Shell(host, cmd, hostKey+cmd, opt.Sudo). Build()) } } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t := b. Parallel(false, shellTasks...). Build() execCtx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(execCtx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } // print outputs for hostKey, i := range uniqueHosts { host, _ := utils.ParseHostPort(hostKey) for _, cmd := range i.Slice() { stdout, stderr, ok := ctxt.GetInner(execCtx).GetOutputs(hostKey + cmd) if !ok { continue } m.logger.Infof("Outputs of %s on %s:", color.CyanString(cmd), color.CyanString(host)) if len(stdout) > 0 { m.logger.Infof("%s:\n%s", color.GreenString("stdout"), stdout) } if len(stderr) > 0 { m.logger.Infof("%s:\n%s", color.RedString("stderr"), stderr) } } } return nil } tiup-1.16.3/pkg/cluster/manager/list.go000066400000000000000000000046441505422223000177440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "encoding/json" "errors" "fmt" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tui" ) // Cluster represents a clsuter type Cluster struct { Name string `json:"name"` User string `json:"user"` Version string `json:"version"` Path string `json:"path"` PrivateKey string `json:"private_key"` } // ListCluster list the clusters. func (m *Manager) ListCluster() error { clusters, err := m.GetClusterList() if err != nil { return err } switch m.logger.GetDisplayMode() { case logprinter.DisplayModeJSON: clusterObj := struct { Clusters []Cluster `json:"clusters"` }{ Clusters: clusters, } data, err := json.Marshal(clusterObj) if err != nil { return err } fmt.Println(string(data)) default: clusterTable := [][]string{ // Header {"Name", "User", "Version", "Path", "PrivateKey"}, } for _, v := range clusters { clusterTable = append(clusterTable, []string{ v.Name, v.User, v.Version, v.Path, v.PrivateKey, }) } tui.PrintTable(clusterTable, true) } return nil } // GetClusterList get the clusters list. func (m *Manager) GetClusterList() ([]Cluster, error) { names, err := m.specManager.List() if err != nil { return nil, err } var clusters = []Cluster{} for _, name := range names { metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return nil, perrs.Trace(err) } base := metadata.GetBaseMeta() clusters = append(clusters, Cluster{ Name: name, User: base.User, Version: base.Version, Path: m.specManager.Path(name), PrivateKey: m.specManager.Path(name, "ssh", "id_rsa"), }) } return clusters, nil } tiup-1.16.3/pkg/cluster/manager/manager.go000066400000000000000000000173121505422223000203770ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "fmt" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) var ( errNSDeploy = errorx.NewNamespace("deploy") errDeployNameDuplicate = errNSDeploy.NewType("name_dup", utils.ErrTraitPreCheck) errNSRename = errorx.NewNamespace("rename") errorRenameNameNotExist = errNSRename.NewType("name_not_exist", utils.ErrTraitPreCheck) errorRenameNameDuplicate = errNSRename.NewType("name_dup", utils.ErrTraitPreCheck) ) // Manager to deploy a cluster. type Manager struct { sysName string specManager *spec.SpecManager logger *logprinter.Logger } // NewManager create a Manager. func NewManager( sysName string, specManager *spec.SpecManager, logger *logprinter.Logger, ) *Manager { return &Manager{ sysName: sysName, specManager: specManager, logger: logger, } } func (m *Manager) meta(name string) (metadata spec.Metadata, err error) { exist, err := m.specManager.Exist(name) if err != nil { return nil, err } if !exist { return nil, perrs.Errorf("%s cluster `%s` not exists", m.sysName, name) } metadata = m.specManager.NewMetadata() err = m.specManager.Metadata(name, metadata) if err != nil { return metadata, err } return metadata, nil } func (m *Manager) confirmTopology(name, version string, topo spec.Topology, patchedRoles set.StringSet) error { m.logger.Infof("Please confirm your topology:") cyan := color.New(color.FgCyan, color.Bold) fmt.Printf("Cluster type: %s\n", cyan.Sprint(m.sysName)) fmt.Printf("Cluster name: %s\n", cyan.Sprint(name)) fmt.Printf("Cluster version: %s\n", cyan.Sprint(version)) if topo.BaseTopo().GlobalOptions.TLSEnabled { fmt.Printf("TLS encryption: %s\n", cyan.Sprint("enabled")) } // check if managehost is set manageHost := false topo.IterInstance(func(inst spec.Instance) { if inst.GetHost() != inst.GetManageHost() { manageHost = true return } }) clusterTable := [][]string{ // Header {"Role", "Host"}, } if manageHost { clusterTable[0] = append(clusterTable[0], "Manage Host") } clusterTable[0] = append(clusterTable[0], "Ports", "OS/Arch", "Directories") topo.IterInstance(func(instance spec.Instance) { comp := instance.ComponentName() if patchedRoles.Exist(comp) || instance.IsPatched() { comp += " (patched)" } instInfo := []string{comp, instance.GetHost()} if manageHost { instInfo = append(instInfo, instance.GetManageHost()) } instInfo = append(instInfo, utils.JoinInt(instance.UsedPorts(), "/"), tui.OsArch(instance.OS(), instance.Arch()), strings.Join(instance.UsedDirs(), ",")) clusterTable = append(clusterTable, instInfo) }) tui.PrintTable(clusterTable, true) m.logger.Warnf("Attention:") m.logger.Warnf(" 1. If the topology is not what you expected, check your yaml file.") m.logger.Warnf(" 2. Please confirm there is no port/directory conflicts in same host.") if len(patchedRoles) != 0 { m.logger.Errorf(" 3. The component marked as `patched` has been replaced by previous patch commanm.") } if spec, ok := topo.(*spec.Specification); ok { if len(spec.TiSparkMasters) > 0 || len(spec.TiSparkWorkers) > 0 { cyan := color.New(color.FgCyan, color.Bold) msg := cyan.Sprint(`There are TiSpark nodes defined in the topology, please note that you'll need to manually install Java Runtime Environment (JRE) 8 on the host, otherwise the TiSpark nodes will fail to start. You may read the OpenJDK doc for a reference: https://openjdk.java.net/install/ `) m.logger.Warnf("%s", msg) } } return tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: ") } func (m *Manager) sshTaskBuilder(name string, topo spec.Topology, user string, gOpt operator.Options) (*task.Builder, error) { var p *tui.SSHConnectionProps = &tui.SSHConnectionProps{} if gOpt.SSHType != executor.SSHTypeNone && len(gOpt.SSHProxyHost) != 0 { var err error if p, err = tui.ReadIdentityFileOrPassword(gOpt.SSHProxyIdentity, gOpt.SSHProxyUsePassword); err != nil { return nil, err } } return task.NewBuilder(m.logger). SSHKeySet( m.specManager.Path(name, "ssh", "id_rsa"), m.specManager.Path(name, "ssh", "id_rsa.pub"), ). ClusterSSH( topo, user, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, p.Password, p.IdentityFile, p.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, topo.BaseTopo().GlobalOptions.SSHType, ), nil } // fillHost full host cpu-arch and kernel-name func (m *Manager) fillHost(s, p *tui.SSHConnectionProps, topo spec.Topology, gOpt *operator.Options, user string, sudo bool) error { if err := m.fillHostArchOrOS(s, p, topo, gOpt, user, spec.FullArchType, sudo); err != nil { return err } return m.fillHostArchOrOS(s, p, topo, gOpt, user, spec.FullOSType, sudo) } // fillHostArchOrOS full host cpu-arch or kernel-name func (m *Manager) fillHostArchOrOS(s, p *tui.SSHConnectionProps, topo spec.Topology, gOpt *operator.Options, user string, fullType spec.FullHostType, sudo bool) error { globalSSHType := topo.BaseTopo().GlobalOptions.SSHType hostArchOrOS := map[string]string{} var detectTasks []*task.StepDisplay topo.IterInstance(func(inst spec.Instance) { if fullType == spec.FullOSType { if inst.OS() != "" { return } } else if inst.Arch() != "" { return } if _, ok := hostArchOrOS[inst.GetManageHost()]; ok { return } hostArchOrOS[inst.GetManageHost()] = "" tf := task.NewBuilder(m.logger). RootSSH( inst.GetManageHost(), inst.GetSSHPort(), user, s.Password, s.IdentityFile, s.IdentityFilePassphrase, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, p.Password, p.IdentityFile, p.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, globalSSHType, sudo, ) switch fullType { case spec.FullOSType: tf = tf.Shell(inst.GetManageHost(), "uname -s", "", false) default: tf = tf.Shell(inst.GetManageHost(), "uname -m", "", false) } detectTasks = append(detectTasks, tf.BuildAsStep(fmt.Sprintf(" - Detecting node %s %s info", inst.GetManageHost(), string(fullType)))) }) if len(detectTasks) == 0 { return nil } ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) t := task.NewBuilder(m.logger). ParallelStep(fmt.Sprintf("+ Detect CPU %s Name", string(fullType)), false, detectTasks...). Build() if err := t.Execute(ctx); err != nil { return perrs.Annotate(err, "failed to fetch cpu-arch or kernel-name") } for host := range hostArchOrOS { stdout, _, ok := ctxt.GetInner(ctx).GetOutputs(host) if !ok { return fmt.Errorf("no check results found for %s", host) } hostArchOrOS[host] = strings.Trim(string(stdout), "\n") } return topo.FillHostArchOrOS(hostArchOrOS, fullType) } tiup-1.16.3/pkg/cluster/manager/manager_test.go000066400000000000000000000052561505422223000214420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "testing" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestVersionCompare(t *testing.T) { var err error err = versionCompare("v4.0.0", "v4.0.1") assert.Nil(t, err) err = versionCompare("v4.0.1", "v4.0.0") assert.NotNil(t, err) err = versionCompare("v4.0.0", "nightly") assert.Nil(t, err) err = versionCompare("nightly", "nightly") assert.Nil(t, err) } func TestValidateNewTopo(t *testing.T) { topo := spec.Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tidb_servers: - host: 172.16.5.138 deploy_dir: "tidb-deploy" pd_servers: - host: 172.16.5.53 data_dir: "pd-data" `), &topo) assert := require.New(t) assert.Nil(err) err = validateNewTopo(&topo) assert.Nil(err) topo = spec.Specification{} err = yaml.Unmarshal([]byte(` tidb_servers: - host: 172.16.5.138 imported: true deploy_dir: "tidb-deploy" pd_servers: - host: 172.16.5.53 data_dir: "pd-data" `), &topo) assert.Nil(err) err = validateNewTopo(&topo) assert.NotNil(err) topo = spec.Specification{} err = yaml.Unmarshal([]byte(` global: user: "test3" deploy_dir: "test-deploy" data_dir: "test-data" pd_servers: - host: 172.16.5.53 imported: true `), &topo) assert.Nil(err) err = validateNewTopo(&topo) assert.NotNil(err) topo = spec.Specification{} err = yaml.Unmarshal([]byte(` global: user: "test4" deploy_dir: "test-deploy" data_dir: "test-data" tso_servers: - host: 172.16.5.53 scheduling_servers: - host: 172.16.5.54 `), &topo) assert.Nil(err) err = validateNewTopo(&topo) assert.Nil(err) } func TestDeduplicateCheckResult(t *testing.T) { checkResults := []HostCheckResult{} for i := 0; i <= 10; i++ { checkResults = append(checkResults, HostCheckResult{ Node: "127.0.0.1", Status: "Warn", Name: "disk", Message: "mount point /home does not have 'noatime' option set", }, ) } checkResults = deduplicateCheckResult(checkResults) if len(checkResults) != 1 { t.Errorf("Deduplicate Check Result Failed") } } tiup-1.16.3/pkg/cluster/manager/patch.go000066400000000000000000000153131505422223000200630ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "fmt" "os" "os/exec" "path" "path/filepath" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // Patch the cluster. func (m *Manager) Patch(name string, packagePath string, opt operator.Options, overwrite, offline, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { if !offline { return errorx.Cast(err). WithProperty(tui.SuggestionFromString("Please run tiup-cluster patch --offline to try again")) } } metadata, err := m.meta(name) if err != nil { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() if !utils.IsExist(packagePath) { return perrs.Errorf("specified package(%s) not exists", packagePath) } if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("Will patch the cluster %s with package path is %s, nodes: %s, roles: %s.\nDo you want to continue? [y/N]:", color.HiYellowString(name), color.HiYellowString(packagePath), color.HiRedString(strings.Join(opt.Nodes, ",")), color.HiRedString(strings.Join(opt.Roles, ",")), ), ); err != nil { return err } } insts, err := instancesToPatch(topo, opt) if err != nil { return err } if err := checkPackage(m.specManager, name, insts[0], insts[0].OS(), insts[0].Arch(), packagePath); err != nil { return err } var replacePackageTasks []task.Task for _, inst := range insts { deployDir := spec.Abs(base.User, inst.DeployDir()) tb := task.NewBuilder(m.logger) tb.BackupComponent(inst.ComponentName(), base.Version, inst.GetManageHost(), deployDir). InstallPackage(packagePath, inst.GetManageHost(), deployDir) replacePackageTasks = append(replacePackageTasks, tb.Build()) } tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } b, err := m.sshTaskBuilder(name, topo, base.User, opt) if err != nil { return err } t := b.Parallel(false, replacePackageTasks...). Func("UpgradeCluster", func(ctx context.Context) error { if offline { return nil } // TBD: should patch be treated as an upgrade? return operator.Upgrade(ctx, topo, opt, tlsCfg, base.Version, base.Version, nil) }). Build() ctx := ctxt.New( context.Background(), opt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } if overwrite { if err := overwritePatch(m.specManager, name, insts[0].ComponentName(), packagePath); err != nil { return err } } // mark instance as patched in meta topo.IterInstance(func(ins spec.Instance) { for _, pachedIns := range insts { if ins.ID() == pachedIns.ID() { ins.SetPatched(true) break } } }) return m.specManager.SaveMeta(name, metadata) } func checkPackage(specManager *spec.SpecManager, name string, inst spec.Instance, nodeOS, arch, packagePath string) error { metadata := specManager.NewMetadata() if err := specManager.Metadata(name, metadata); err != nil { return err } ver := inst.CalculateVersion(metadata.GetBaseMeta().Version) repo, err := clusterutil.NewRepository(nodeOS, arch) if err != nil { return err } entry, err := repo.ComponentBinEntry(inst.ComponentSource(), ver) if err != nil { return err } checksum, err := utils.Checksum(packagePath) if err != nil { return err } cacheDir := specManager.Path(name, "cache", inst.ComponentSource()+"-"+checksum[:7]) if err := utils.MkdirAll(cacheDir, 0755); err != nil { return perrs.Annotatef(err, "create cache directory %s", cacheDir) } if err := exec.Command("tar", "-xvf", packagePath, "-C", cacheDir).Run(); err != nil { return perrs.Annotatef(err, "decompress %s", packagePath) } fi, err := os.Stat(path.Join(cacheDir, entry)) if err != nil { if os.IsNotExist(err) { return perrs.Errorf("entry %s not found in package %s", entry, packagePath) } return perrs.AddStack(err) } if !fi.Mode().IsRegular() { return perrs.Errorf("entry %s in package %s is not a regular file", entry, packagePath) } if fi.Mode()&0500 != 0500 { return perrs.Errorf("entry %s in package %s is not executable", entry, packagePath) } return nil } func overwritePatch(specManager *spec.SpecManager, name, comp, packagePath string) error { if err := utils.MkdirAll(specManager.Path(name, spec.PatchDirName), 0755); err != nil { return err } checksum, err := utils.Checksum(packagePath) if err != nil { return err } tg := specManager.Path(name, spec.PatchDirName, comp+"-"+checksum[:7]+".tar.gz") if !utils.IsExist(tg) { if err := utils.Copy(packagePath, tg); err != nil { return err } } symlink := specManager.Path(name, spec.PatchDirName, comp+".tar.gz") if utils.IsSymExist(symlink) { os.Remove(symlink) } tgRelPath, err := filepath.Rel(filepath.Dir(symlink), tg) if err != nil { return err } return os.Symlink(tgRelPath, symlink) } func instancesToPatch(topo spec.Topology, options operator.Options) ([]spec.Instance, error) { roleFilter := set.NewStringSet(options.Roles...) nodeFilter := set.NewStringSet(options.Nodes...) components := topo.ComponentsByStartOrder() components = operator.FilterComponent(components, roleFilter) instances := []spec.Instance{} comps := []string{} for _, com := range components { insts := operator.FilterInstance(com.Instances(), nodeFilter) if len(insts) > 0 { comps = append(comps, com.Name()) } instances = append(instances, insts...) } if len(comps) > 1 { return nil, fmt.Errorf("can't patch more than one component at once: %v", comps) } if len(instances) == 0 { return nil, fmt.Errorf("no instance found on specified role(%v) and nodes(%v)", options.Roles, options.Nodes) } return instances, nil } tiup-1.16.3/pkg/cluster/manager/reload.go000066400000000000000000000101051505422223000202240ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "fmt" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/tui" ) // Reload the cluster. func (m *Manager) Reload(name string, gOpt operator.Options, skipRestart, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { return err } sshTimeout := gOpt.SSHTimeout exeTimeout := gOpt.OptTimeout metadata, err := m.meta(name) if err != nil { return err } var sshProxyProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} if gOpt.SSHType != executor.SSHTypeNone && len(gOpt.SSHProxyHost) != 0 { var err error if sshProxyProps, err = tui.ReadIdentityFileOrPassword(gOpt.SSHProxyIdentity, gOpt.SSHProxyUsePassword); err != nil { return err } } if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("Will reload the cluster %s with restart policy is %s, nodes: %s, roles: %s.\nDo you want to continue? [y/N]:", color.HiYellowString(name), color.HiRedString(fmt.Sprintf("%v", !skipRestart)), color.HiRedString(strings.Join(gOpt.Nodes, ",")), color.HiRedString(strings.Join(gOpt.Roles, ",")), ), ); err != nil { return err } } topo := metadata.GetTopology() base := metadata.GetBaseMeta() // monitor uniqueHosts, noAgentHosts := getMonitorHosts(topo) // init config refreshConfigTasks, hasImported := buildInitConfigTasks(m, name, topo, base, gOpt, nil) // handle dir scheme changes if hasImported { if err := spec.HandleImportPathMigration(name); err != nil { return err } } monitorConfigTasks := buildInitMonitoredConfigTasks( m.specManager, name, uniqueHosts, noAgentHosts, *topo.BaseTopo().GlobalOptions, topo.GetMonitoredOptions(), m.logger, sshTimeout, exeTimeout, gOpt, sshProxyProps, ) b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } if topo.Type() == spec.TopoTypeTiDB && !skipRestart { b.UpdateTopology( name, m.specManager.Path(name), metadata.(*spec.ClusterMeta), nil, /* deleteNodeIds */ ) } b.ParallelStep("+ Refresh instance configs", gOpt.Force, refreshConfigTasks...) if len(monitorConfigTasks) > 0 { b.ParallelStep("+ Refresh monitor configs", gOpt.Force, monitorConfigTasks...) } // Save the updated topology back to file after configs are refreshed // This ensures any modifications made during InitConfig (like handleRemoteWrite) are persisted b.Func("Save updated topology", func(ctx context.Context) error { // Save metadata back to file return m.specManager.SaveMeta(name, metadata) }) if !skipRestart { tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } b.Func("Upgrade Cluster", func(ctx context.Context) error { return operator.Upgrade(ctx, topo, gOpt, tlsCfg, base.Version, base.Version, nil) }) } t := b.Build() ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } m.logger.Infof("Reloaded cluster `%s` successfully", name) return nil } tiup-1.16.3/pkg/cluster/manager/rename.go000066400000000000000000000042741505422223000202370ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "fmt" "os" "github.com/fatih/color" "github.com/pingcap/tiup/pkg/cluster/clusterutil" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // Rename the cluster func (m *Manager) Rename(name string, opt operator.Options, newName string, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } if !utils.IsExist(m.specManager.Path(name)) { return errorRenameNameNotExist. New("Cluster name '%s' not exist", name). WithProperty(tui.SuggestionFromFormat("Please double check your cluster name")) } if err := clusterutil.ValidateClusterNameOrError(newName); err != nil { return err } if utils.IsExist(m.specManager.Path(newName)) { return errorRenameNameDuplicate. New("Cluster name '%s' is duplicated", newName). WithProperty(tui.SuggestionFromFormat("Please specify another cluster name")) } if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("Will rename the cluster name from %s to %s.\nDo you confirm this action? [y/N]:", color.HiYellowString(name), color.HiYellowString(newName)), ); err != nil { return err } } _, err := m.meta(name) if err != nil { // refuse renaming if current cluster topology is not valid return err } if err := os.Rename(m.specManager.Path(name), m.specManager.Path(newName)); err != nil { return err } m.logger.Infof("Rename cluster `%s` -> `%s` successfully", name, newName) opt.Roles = []string{spec.ComponentGrafana, spec.ComponentPrometheus} return m.Reload(newName, opt, false, skipConfirm) } tiup-1.16.3/pkg/cluster/manager/rotate_ssh.go000066400000000000000000000057331505422223000211440ustar00rootroot00000000000000// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "errors" "fmt" "os" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tui" ) // RotateSSH rotate public keys of target nodes func (m *Manager) RotateSSH(name string, gOpt operator.Options, skipConfirm bool) error { metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() if !skipConfirm { if err := tui.PromptForConfirmOrAbortError( "This operation will rotate ssh keys for user '%s' .\nDo you want to continue? [y/N]:", base.User); err != nil { return err } } var rotateSSHTasks []*task.StepDisplay // tasks which are used to initialize environment uniqueHosts, _ := getMonitorHosts(topo) for host, hostInfo := range uniqueHosts { t, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t = t.RotateSSH(host, base.User, m.specManager.Path(name, "ssh", "new.pub")) rotateSSHTasks = append(rotateSSHTasks, t.BuildAsStep(fmt.Sprintf(" - Rotate ssh key on %s:%d", host, hostInfo.ssh))) } builder := task.NewBuilder(m.logger). Step("+ Generate new SSH keys", task.NewBuilder(m.logger). SSHKeyGen(m.specManager.Path(name, "ssh", "new")). Build(), m.logger). ParallelStep("+ rotate ssh keys of target host environments", false, rotateSSHTasks...). Step("+ overwrite old SSH keys", task.NewBuilder(m.logger). Func("rename", func(ctx context.Context) error { err := os.Rename(m.specManager.Path(name, "ssh", "new.pub"), m.specManager.Path(name, "ssh", "id_rsa.pub")) if err != nil { return err } err = os.Rename(m.specManager.Path(name, "ssh", "new"), m.specManager.Path(name, "ssh", "id_rsa")) if err != nil { return err } return nil }). Build(), m.logger) ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := builder.Build().Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return err } m.logger.Infof("ssh keys are successfully updated") return nil } tiup-1.16.3/pkg/cluster/manager/scale_in.go000066400000000000000000000132311505422223000205360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "crypto/tls" "errors" "fmt" "strings" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" ) // ScaleIn the cluster. func (m *Manager) ScaleIn( name string, skipConfirm bool, gOpt operator.Options, scale func(builder *task.Builder, metadata spec.Metadata, tlsCfg *tls.Config), ) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { return err } var ( force bool = gOpt.Force nodes []string = gOpt.Nodes ) metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrMultipleTiSparkMaster) && !errors.Is(perrs.Cause(err), spec.ErrMultipleTisparkWorker) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { // ignore conflict check error, node may be deployed by former version // that lack of some certain conflict checks return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() if !skipConfirm { if force { m.logger.Warnf("%s", color.HiRedString(tui.ASCIIArtWarning)) if err := tui.PromptForAnswerOrAbortError( "Yes, I know my data might be lost.", "%s", color.HiRedString("Forcing scale in is unsafe and may result in data loss for stateful components.\n"+ "DO NOT use `--force` if you have any component in ")+ color.YellowString("Pending Offline")+color.HiRedString(" status.\n")+ color.HiRedString("The process is irreversible and could NOT be cancelled.\n")+ "Only use `--force` when some of the servers are already permanently offline.\n"+ "Are you sure to continue?", ); err != nil { return err } } if err := tui.PromptForConfirmOrAbortError( "This operation will delete the %s nodes in `%s` and all their data.\nDo you want to continue? [y/N]:", strings.Join(nodes, ","), color.HiYellowString(name)); err != nil { return err } if err := checkAsyncComps(topo, nodes); err != nil { return err } m.logger.Infof("Scale-in nodes...") } // Regenerate configuration gOpt.IgnoreConfigCheck = true tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } scale(b, metadata, tlsCfg) ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := b.Build().Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } // get new metadata metadata, err = m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), spec.ErrMultipleTiSparkMaster) && !errors.Is(perrs.Cause(err), spec.ErrMultipleTisparkWorker) && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { // ignore conflict check error, node may be deployed by former version // that lack of some certain conflict checks return err } topo = metadata.GetTopology() base = metadata.GetBaseMeta() regenConfigTasks, hasImported := buildInitConfigTasks(m, name, topo, base, gOpt, nodes) // handle dir scheme changes if hasImported { if err := spec.HandleImportPathMigration(name); err != nil { return err } } b, err = m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t := b. ParallelStep("+ Refresh instance configs", force, regenConfigTasks...). ParallelStep("+ Reload prometheus and grafana", gOpt.Force, buildReloadPromAndGrafanaTasks(metadata.GetTopology(), m.logger, gOpt, nodes...)...). Build() if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } m.logger.Infof("Scaled cluster `%s` in successfully", name) return nil } // checkAsyncComps func checkAsyncComps(topo spec.Topology, nodes []string) error { var asyncOfflineComps = set.NewStringSet(spec.ComponentPump, spec.ComponentTiKV, spec.ComponentTiFlash, spec.ComponentDrainer) deletedNodes := set.NewStringSet(nodes...) delAsyncOfflineComps := set.NewStringSet() topo.IterInstance(func(instance spec.Instance) { if deletedNodes.Exist(instance.ID()) { if asyncOfflineComps.Exist(instance.ComponentName()) { delAsyncOfflineComps.Insert(instance.ComponentName()) } } }) if len(delAsyncOfflineComps.Slice()) > 0 { return tui.PromptForConfirmOrAbortError("%s", fmt.Sprintf( "%s\nDo you want to continue? [y/N]:", color.YellowString( "The component `%s` will become tombstone, maybe exists in several minutes or hours, after that you can use the prune command to clean it", delAsyncOfflineComps.Slice()))) } return nil } tiup-1.16.3/pkg/cluster/manager/scale_out.go000066400000000000000000000237201505422223000207430ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "errors" "fmt" "time" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "gopkg.in/yaml.v3" ) // ScaleOut scale out the cluster. func (m *Manager) ScaleOut( name string, topoFile string, afterDeploy func(b *task.Builder, newPart spec.Topology, gOpt operator.Options), final func(b *task.Builder, name string, meta spec.Metadata, gOpt operator.Options), opt DeployOptions, skipConfirm bool, gOpt operator.Options, ) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } // check the scale out file lock is exist err := checkScaleOutLock(m, name, opt, skipConfirm) if err != nil { return err } metadata, err := m.meta(name) // allow specific validation errors so that user can recover a broken // cluster if it is somehow in a bad state. if err != nil && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() // Inherit existing global configuration. We must assign the inherited values before unmarshalling // because some default value rely on the global options and monitored options. newPart := topo.NewPart() // if stage2 is true, the new part data store in scale-out file lock if opt.Stage2 { // Acquire the Scale-out file lock newPart, err = m.specManager.ScaleOutLock(name) if err != nil { return err } } else { // if stage2 is true, not need check topology or other // check for the input topology to let user confirm if there're any // global configs set if err := checkForGlobalConfigs(m.logger, topoFile, skipConfirm); err != nil { return err } // The no tispark master error is ignored, as if the tispark master is removed from the topology // file for some reason (manual edit, for example), it is still possible to scale-out it to make // the whole topology back to normal state. if err := spec.ParseTopologyYaml(topoFile, newPart, true); err != nil && !errors.Is(perrs.Cause(err), spec.ErrNoTiSparkMaster) { return err } if err := checkTiFlashWithTLS(topo, base.Version); err != nil { return err } if newPartTopo, ok := newPart.(*spec.Specification); ok { newPartTopo.AdjustByVersion(base.Version) } if err := validateNewTopo(newPart); err != nil { return err } } var ( sshConnProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} sshProxyProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} ) if gOpt.SSHType != executor.SSHTypeNone { var err error if sshConnProps, err = tui.ReadIdentityFileOrPassword(opt.IdentityFile, opt.UsePassword); err != nil { return err } if len(gOpt.SSHProxyHost) != 0 { if sshProxyProps, err = tui.ReadIdentityFileOrPassword(gOpt.SSHProxyIdentity, gOpt.SSHProxyUsePassword); err != nil { return err } } } var sudo bool if topo.BaseTopo().GlobalOptions.SystemdMode == spec.UserMode { sudo = false hint := fmt.Sprintf("loginctl enable-linger %s", opt.User) msg := "The value of systemd_mode is set to `user` in the topology, please note that you'll need to manually execute the following command using root or sudo on the host(s) to enable lingering for the systemd user instance.\n" msg += color.GreenString(hint) msg += "\nYou can read the systemd documentation for reference: https://wiki.archlinux.org/title/Systemd/User#Automatic_start-up_of_systemd_user_instances." m.logger.Warnf("%s", msg) err = tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: ") if err != nil { return err } } else { sudo = opt.User != "root" } if err := m.fillHost(sshConnProps, sshProxyProps, newPart, &gOpt, opt.User, sudo); err != nil { return err } var mergedTopo spec.Topology // in satge2, not need mergedTopo if opt.Stage2 { mergedTopo = topo } else { // Abort scale out operation if the merged topology is invalid mergedTopo = topo.MergeTopo(newPart) if err := mergedTopo.Validate(); err != nil { return err } spec.ExpandRelativeDir(mergedTopo) if topo, ok := mergedTopo.(*spec.Specification); ok { // Check if TiKV's label set correctly if !opt.NoLabels { pdList := topo.BaseTopo().MasterList tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } pdClient := api.NewPDClient( context.WithValue(context.TODO(), logprinter.ContextKeyLogger, m.logger), pdList, 10*time.Second, tlsCfg, ) lbs, placementRule, err := pdClient.GetLocationLabels() if err != nil { return err } if !placementRule { if err := spec.CheckTiKVLabels(lbs, mergedTopo.(*spec.Specification)); err != nil { return perrs.Errorf("check TiKV label failed, please fix that before continue:\n%s", err) } } } } if err := checkConflict(m, name, mergedTopo); err != nil { return err } } patchedComponents := set.NewStringSet() // if stage2 is true, this check is not work newPart.IterInstance(func(instance spec.Instance) { if utils.IsExist(m.specManager.Path(name, spec.PatchDirName, instance.ComponentName()+".tar.gz")) { patchedComponents.Insert(instance.ComponentName()) instance.SetPatched(true) } }) if !skipConfirm { // patchedComponents are components that have been patched and overwrited if err := m.confirmTopology(name, base.Version, newPart, patchedComponents); err != nil { return err } } // Build the scale out tasks t, err := buildScaleOutTask( m, name, metadata, mergedTopo, opt, sshConnProps, sshProxyProps, newPart, patchedComponents, gOpt, afterDeploy, final) if err != nil { return err } ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) ctx = context.WithValue(ctx, ctxt.CtxBaseTopo, topo) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } if opt.Stage1 { m.logger.Infof(`The new instance is not started! You need to execute '%s' to start the new instance.`, color.YellowString("tiup cluster scale-out %s --stage2", name)) } m.logger.Infof("Scaled cluster `%s` out successfully", color.YellowString(name)) return nil } // validateNewTopo checks the new part of scale-out topology to make sure it's supported func validateNewTopo(topo spec.Topology) (err error) { topo.IterInstance(func(instance spec.Instance) { // check for "imported" parameter, it can not be true when scaling out if instance.IsImported() { err = errors.New( "'imported' is set to 'true' for new instance, this is only used " + "for instances imported from tidb-ansible and make no sense when " + "scaling out, please delete the line or set it to 'false' for new instances") return } }) return err } // checkForGlobalConfigs checks the input scale out topology to make sure users are aware // of the global config fields in it will be ignored. func checkForGlobalConfigs(logger *logprinter.Logger, topoFile string, skipConfirm bool) error { yamlFile, err := spec.ReadYamlFile(topoFile) if err != nil { return err } var newPart map[string]any if err := yaml.Unmarshal(yamlFile, &newPart); err != nil { return err } // user confirmed, skip checks for k := range newPart { switch k { case "global", "monitored", "server_configs": logger.Warnf(`You have one or more of %s fields configured in the scale out topology, but they will be ignored during the scaling out process. If you want to use configs different from the existing cluster, cancel now and set them in the specification fields for each host.`, color.YellowString(`["global", "monitored", "server_configs"]`)) if !skipConfirm { if err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: "); err != nil { return err } } return nil } } return nil } // checkEnvWithStage1 check environment in scale-out stage 1 func checkScaleOutLock(m *Manager, name string, opt DeployOptions, skipConfirm bool) error { locked, _ := m.specManager.IsScaleOutLocked(name) if (!opt.Stage1 && !opt.Stage2) && locked { return m.specManager.ScaleOutLockedErr(name) } if opt.Stage1 { if locked { return m.specManager.ScaleOutLockedErr(name) } m.logger.Warnf(`The parameter '%s' is set, new instance will not be started Please manually execute '%s' to finish the process.`, color.YellowString("--stage1"), color.YellowString("tiup cluster scale-out %s --stage2", name)) if !skipConfirm { if err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: "); err != nil { return err } } } if opt.Stage2 { if !locked { return fmt.Errorf("The scale-out file lock does not exist, please make sure to run 'tiup-cluster scale-out %s --stage1' first", name) } m.logger.Warnf(`The parameter '%s' is set, only start the new instances and reload configs.`, color.YellowString("--stage2")) if !skipConfirm { if err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: "); err != nil { return err } } } return nil } tiup-1.16.3/pkg/cluster/manager/show_config.go000066400000000000000000000022011505422223000212610ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "errors" "fmt" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/meta" "gopkg.in/yaml.v3" ) // ShowConfig shows the cluster's config. func (m *Manager) ShowConfig(name string) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } metadata, err := m.meta(name) if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) { return err } topo := metadata.GetTopology() data, err := yaml.Marshal(topo) if err != nil { return perrs.AddStack(err) } fmt.Print(string(data)) return nil } tiup-1.16.3/pkg/cluster/manager/tls.go000066400000000000000000000124361505422223000175710ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "fmt" "os" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" ) // TLS set cluster enable/disable encrypt communication by tls func (m *Manager) TLS(name string, gOpt operator.Options, enable, cleanCertificate, reloadCertificate, skipConfirm bool) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } // check locked if err := m.specManager.ScaleOutLockedErr(name); err != nil { return err } metadata, err := m.meta(name) if err != nil { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() // set tls_enabled globalOptions := topo.BaseTopo().GlobalOptions // if force is true, skip this check if globalOptions.TLSEnabled == enable && !gOpt.Force { if enable { m.logger.Infof("cluster `%s` TLS status is already enabled\n", name) } else { m.logger.Infof("cluster `%s` TLS status is already disabled\n", name) } return nil } globalOptions.TLSEnabled = enable if err := checkTLSEnv(topo, name, base.Version, skipConfirm); err != nil { return err } var ( sshProxyProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} ) if gOpt.SSHType != executor.SSHTypeNone { var err error if len(gOpt.SSHProxyHost) != 0 { if sshProxyProps, err = tui.ReadIdentityFileOrPassword(gOpt.SSHProxyIdentity, gOpt.SSHProxyUsePassword); err != nil { return err } } } // delFileMap: files that need to be cleaned up, if flag -- cleanCertificate are used delFileMap, err := getTLSFileMap(m, name, topo, enable, cleanCertificate, skipConfirm) if err != nil { return err } // Build the tls tasks t, err := buildTLSTask( m, name, metadata, gOpt, reloadCertificate, sshProxyProps, delFileMap) if err != nil { return err } ctx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } if err := m.specManager.SaveMeta(name, metadata); err != nil { return err } if !enable { // the cleanCertificate parameter will only take effect when enable is false if cleanCertificate { os.RemoveAll(m.specManager.Path(name, spec.TLSCertKeyDir)) } m.logger.Infof("\tCleanup localhost tls file success") } if enable { m.logger.Infof("Enabled TLS between TiDB components for cluster `%s` successfully", name) } else { m.logger.Infof("Disabled TLS between TiDB components for cluster `%s` successfully", name) } return nil } // checkTLSEnv check tiflash vserson and show confirm func checkTLSEnv(topo spec.Topology, clusterName, version string, skipConfirm bool) error { // check tiflash version if err := checkTiFlashWithTLS(topo, version); err != nil { return err } if clusterSpec, ok := topo.(*spec.Specification); ok { if len(clusterSpec.PDServers) != 1 { return errorx.EnsureStackTrace(fmt.Errorf("Having multiple PD nodes is not supported when enable/disable TLS")). WithProperty(tui.SuggestionFromString("Please `scale-in` PD nodes to one and try again.")) } } if err := topo.Validate(); err != nil { return err } if !skipConfirm { return tui.PromptForConfirmOrAbortError( "%s", fmt.Sprintf("Enable/Disable TLS will %s the cluster `%s`\nDo you want to continue? [y/N]:", color.HiYellowString("stop and restart"), color.HiYellowString(clusterName), )) } return nil } // getTLSFileMap func getTLSFileMap(m *Manager, clusterName string, topo spec.Topology, enableTLS, cleanCertificate, skipConfirm bool) (map[string]set.StringSet, error) { delFileMap := make(map[string]set.StringSet) if !enableTLS && cleanCertificate { // get: host: set(tlsdir) delFileMap = getCleanupFiles(topo, false, false, cleanCertificate, false, []string{}, []string{}) // build file list string delFileList := fmt.Sprintf("\n%s:\n %s", color.CyanString("localhost"), m.specManager.Path(clusterName, spec.TLSCertKeyDir)) for host, fileList := range delFileMap { delFileList += fmt.Sprintf("\n%s:", color.CyanString(host)) for _, dfp := range fileList.Slice() { delFileList += fmt.Sprintf("\n %s", dfp) } } m.logger.Warnf("The parameter `%s` will delete the following files: %s", color.YellowString("--clean-certificate"), delFileList) if !skipConfirm { if err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]:"); err != nil { return delFileMap, err } } } return delFileMap, nil } tiup-1.16.3/pkg/cluster/manager/transfer.go000066400000000000000000000116621505422223000206130ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "bytes" "context" "fmt" "html/template" "reflect" "strings" "github.com/google/uuid" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/set" "go.uber.org/zap" ) // TransferOptions for exec shell commanm. type TransferOptions struct { Local string Remote string Pull bool // default to push Limit int // rate limit in Kbit/s Compress bool // enable compress } // Transfer copies files from or to host in the tidb cluster. func (m *Manager) Transfer(name string, opt TransferOptions, gOpt operator.Options) error { if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } metadata, err := m.meta(name) if err != nil { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() filterRoles := set.NewStringSet(gOpt.Roles...) filterNodes := set.NewStringSet(gOpt.Nodes...) var shellTasks []task.Task uniqueHosts := map[string]set.StringSet{} // host-sshPort -> {remote-path} topo.IterInstance(func(inst spec.Instance) { key := fmt.Sprintf("%d-%s", inst.GetSSHPort(), inst.GetManageHost()) if _, found := uniqueHosts[key]; !found { if len(gOpt.Roles) > 0 && !filterRoles.Exist(inst.Role()) { return } if len(gOpt.Nodes) > 0 && (!filterNodes.Exist(inst.GetHost()) || !filterNodes.Exist(inst.GetManageHost())) { return } // render remote path instPath := opt.Remote paths, err := renderInstanceSpec(instPath, inst) if err != nil { m.logger.Debugf("error rendering remote path with spec: %s", err) return // skip } pathSet := set.NewStringSet(paths...) if _, ok := uniqueHosts[key]; ok { uniqueHosts[key].Join(pathSet) return } uniqueHosts[key] = pathSet } }) srcPath := opt.Local for hostKey, i := range uniqueHosts { host := hostKey[len(strings.Split(hostKey, "-")[0])+1:] for _, p := range i.Slice() { t := task.NewBuilder(m.logger) if opt.Pull { t.CopyFile(p, srcPath, host, opt.Pull, opt.Limit, opt.Compress) } else { t.CopyFile(srcPath, p, host, opt.Pull, opt.Limit, opt.Compress) } shellTasks = append(shellTasks, t.Build()) } } b, err := m.sshTaskBuilder(name, topo, base.User, gOpt) if err != nil { return err } t := b. Parallel(false, shellTasks...). Build() execCtx := ctxt.New( context.Background(), gOpt.Concurrency, m.logger, ) if err := t.Execute(execCtx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } return nil } func renderInstanceSpec(t string, inst spec.Instance) ([]string, error) { result := make([]string, 0) switch inst.ComponentName() { case spec.ComponentTiFlash: for d := range strings.SplitSeq(inst.DataDir(), ",") { tfs, ok := inst.(*spec.TiFlashInstance).InstanceSpec.(*spec.TiFlashSpec) if !ok { return result, perrs.Errorf("instance type mismatch for %v", inst) } tfs.DataDir = d key := inst.ID() + d + uuid.New().String() s, err := renderSpec(t, inst.(*spec.TiFlashInstance), key) if err != nil { zap.L().Debug("error rendering tiflash spec", zap.Error(err)) } result = append(result, s) } default: s, err := renderSpec(t, inst, inst.ID()) if err != nil { return result, perrs.Errorf("error rendering path for instance %v", inst) } result = append(result, s) } return result, nil } func renderSpec(t string, s any, id string) (string, error) { // Only apply on *spec.TiDBInstance and *spec.PDInstance etc. if v := reflect.ValueOf(s); v.Kind() == reflect.Ptr { if v = v.Elem(); !v.IsValid() { return "", perrs.Errorf("invalid spec") } if v = v.FieldByName("BaseInstance"); !v.IsValid() { return "", perrs.Errorf("field BaseInstance not found") } if v = v.FieldByName("InstanceSpec"); !v.IsValid() { return "", perrs.Errorf("field InstanceSpec not found") } s = v.Interface() } tpl, err := template.New(id).Option("missingkey=error").Parse(t) if err != nil { return "", err } result := bytes.NewBufferString("") if err := tpl.Execute(result, s); err != nil { zap.L().Debug("missing key when parsing: %s", zap.Error(err)) return "", err } return result.String(), nil } tiup-1.16.3/pkg/cluster/manager/transfer_test.go000066400000000000000000000045171505422223000216530ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "testing" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/stretchr/testify/assert" ) func TestRenderSpec(t *testing.T) { var s spec.Instance = &spec.TiDBInstance{BaseInstance: spec.BaseInstance{ InstanceSpec: &spec.TiDBSpec{ Host: "172.16.5.140", SSHPort: 22, Imported: false, Port: 4000, StatusPort: 10080, DeployDir: "/home/test/deploy/tidb-4000", Arch: "amd64", OS: "linux", }, }} dir, err := renderSpec("{{.DataDir}}", s, "test-tidb") assert.NotNil(t, err) assert.Empty(t, dir) s = &spec.PDInstance{BaseInstance: spec.BaseInstance{ InstanceSpec: &spec.PDSpec{ Host: "172.16.5.140", SSHPort: 22, Imported: false, Name: "pd-1", ClientPort: 2379, PeerPort: 2380, DeployDir: "/home/test/deploy/pd-2379", DataDir: "/home/test/deploy/pd-2379/data", }, }} // s.BaseInstance.InstanceSpec dir, err = renderSpec("{{.DataDir}}", s, "test-pd") assert.Nil(t, err) assert.NotEmpty(t, dir) s = &spec.TSOInstance{BaseInstance: spec.BaseInstance{ InstanceSpec: &spec.TSOSpec{ Host: "172.16.5.140", SSHPort: 22, Name: "tso-1", DeployDir: "/home/test/deploy/tso-3379", DataDir: "/home/test/deploy/tso-3379/data", }, }} // s.BaseInstance.InstanceSpec dir, err = renderSpec("{{.DataDir}}", s, "test-tso") assert.Nil(t, err) assert.NotEmpty(t, dir) s = &spec.SchedulingInstance{BaseInstance: spec.BaseInstance{ InstanceSpec: &spec.SchedulingSpec{ Host: "172.16.5.140", SSHPort: 22, Name: "scheduling-1", DeployDir: "/home/test/deploy/scheduling-3379", DataDir: "/home/test/deploy/scheduling-3379/data", }, }} // s.BaseInstance.InstanceSpec dir, err = renderSpec("{{.DataDir}}", s, "test-scheduling") assert.Nil(t, err) assert.NotEmpty(t, dir) } tiup-1.16.3/pkg/cluster/manager/upgrade.go000066400000000000000000000271031505422223000204130ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package manager import ( "context" "fmt" "os" "strings" "time" "github.com/fatih/color" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/task" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/mod/semver" ) func (m *Manager) upgradePrecheck(name string, componentVersions map[string]string, opt operator.Options, skipConfirm bool) error { if !skipConfirm && strings.ToLower(opt.DisplayMode) != "json" { for _, v := range componentVersions { if v != "" { m.logger.Warnf("%s", color.YellowString("tiup-cluster does not provide compatibility guarantees or version checks for different component versions. Please be aware of the risks or use it with the assistance of PingCAP support.")) err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]: ") if err != nil { return err } break } } } if err := clusterutil.ValidateClusterNameOrError(name); err != nil { return err } // check locked return m.specManager.ScaleOutLockedErr(name) } // Upgrade the cluster. func (m *Manager) Upgrade(name string, clusterVersion string, componentVersions map[string]string, opt operator.Options, skipConfirm, offline, ignoreVersionCheck bool, restartTimeout time.Duration) error { err := m.upgradePrecheck(name, componentVersions, opt, skipConfirm) if err != nil { return err } metadata, err := m.meta(name) if err != nil { return err } topo := metadata.GetTopology() base := metadata.GetBaseMeta() // Adjust topo by new version if clusterTopo, ok := topo.(*spec.Specification); ok { clusterTopo.AdjustByVersion(clusterVersion) } var ( downloadCompTasks []task.Task // tasks which are used to download components copyCompTasks []task.Task // tasks which are used to copy components to remote host uniqueComps = map[string]struct{}{} ) if err := versionCompare(base.Version, clusterVersion); err != nil { if !ignoreVersionCheck { return err } m.logger.Warnf("%s", color.RedString("There is no guarantee that the cluster can be downgraded. Be careful before you continue.")) } compVersionMsg := "" restartComponents := []string{} components := topo.ComponentsByUpdateOrder(base.Version) for _, comp := range components { // if component version is not specified, use the cluster version or latest("") oldver := comp.CalculateVersion(base.Version) version := componentVersions[comp.Name()] if version != "" { comp.SetVersion(version) } calver := comp.CalculateVersion(clusterVersion) if comp.Name() != spec.ComponentTiProxy || calver != oldver { restartComponents = append(restartComponents, comp.Name(), comp.Role()) if len(comp.Instances()) > 0 { compVersionMsg += fmt.Sprintf("\nwill upgrade and restart component \"%19s\" to \"%s\",", comp.Name(), calver) } } } components = operator.FilterComponent(components, set.NewStringSet(restartComponents...)) monitoredOptions := topo.GetMonitoredOptions() if monitoredOptions != nil { if componentVersions[spec.ComponentBlackboxExporter] != "" { monitoredOptions.BlackboxExporterVersion = componentVersions[spec.ComponentBlackboxExporter] } if componentVersions[spec.ComponentNodeExporter] != "" { monitoredOptions.NodeExporterVersion = componentVersions[spec.ComponentNodeExporter] } compVersionMsg += fmt.Sprintf("\nwill upgrade component %19s to \"%s\",", "\"node-exporter\"", monitoredOptions.NodeExporterVersion) compVersionMsg += fmt.Sprintf("\nwill upgrade component %19s to \"%s\".", "\"blackbox-exporter\"", monitoredOptions.BlackboxExporterVersion) } m.logger.Warnf(`%s This operation will upgrade %s %s cluster %s (with a concurrency of %d) to %s:%s`, color.YellowString("Before the upgrade, it is recommended to read the upgrade guide at https://docs.pingcap.com/tidb/stable/upgrade-tidb-using-tiup and finish the preparation steps."), m.sysName, color.HiYellowString(base.Version), color.HiYellowString(name), opt.Concurrency, color.HiYellowString(clusterVersion), compVersionMsg) if !skipConfirm { if err := tui.PromptForConfirmOrAbortError(`Do you want to continue? [y/N]:`); err != nil { return err } m.logger.Infof("Upgrading cluster...") } hasImported := false for _, comp := range components { version := comp.CalculateVersion(clusterVersion) for _, inst := range comp.Instances() { // Download component from repository key := fmt.Sprintf("%s-%s-%s-%s", inst.ComponentSource(), version, inst.OS(), inst.Arch()) if _, found := uniqueComps[key]; !found { uniqueComps[key] = struct{}{} t := task.NewBuilder(m.logger). Download(inst.ComponentSource(), inst.OS(), inst.Arch(), version). Build() downloadCompTasks = append(downloadCompTasks, t) } deployDir := spec.Abs(base.User, inst.DeployDir()) // data dir would be empty for components which don't need it dataDirs := spec.MultiDirAbs(base.User, inst.DataDir()) // log dir will always be with values, but might not used by the component logDir := spec.Abs(base.User, inst.LogDir()) // Deploy component tb := task.NewBuilder(m.logger) // for some component, dataDirs might need to be created due to upgrade // eg: TiCDC support DataDir since v4.0.13 tb = tb.Mkdir(topo.BaseTopo().GlobalOptions.User, inst.GetManageHost(), topo.BaseTopo().GlobalOptions.SystemdMode != spec.UserMode, dataDirs...) if inst.IsImported() { switch inst.ComponentName() { case spec.ComponentPrometheus, spec.ComponentGrafana, spec.ComponentAlertmanager: tb.CopyComponent( inst.ComponentSource(), inst.OS(), inst.Arch(), version, "", // use default srcPath inst.GetManageHost(), deployDir, ) } hasImported = true } // backup files of the old version tb = tb.BackupComponent(inst.ComponentSource(), base.Version, inst.GetManageHost(), deployDir) // this interface is not used if deployerInstance, ok := inst.(DeployerInstance); ok { deployerInstance.Deploy(tb, "", deployDir, version, name, clusterVersion) } else { // copy dependency component if needed switch inst.ComponentName() { case spec.ComponentTiSpark: env := environment.GlobalEnv() sparkVer, _, err := env.V1Repository().WithOptions(repository.Options{ GOOS: inst.OS(), GOARCH: inst.Arch(), }).LatestStableVersion(spec.ComponentSpark, false) if err != nil { return err } tb = tb.DeploySpark(inst, sparkVer.String(), "" /* default srcPath */, deployDir) default: tb = tb.CopyComponent( inst.ComponentSource(), inst.OS(), inst.Arch(), version, "", // use default srcPath inst.GetManageHost(), deployDir, ) } } tb.InitConfig( name, clusterVersion, m.specManager, inst, base.User, opt.IgnoreConfigCheck, meta.DirPaths{ Deploy: deployDir, Data: dataDirs, Log: logDir, Cache: m.specManager.Path(name, spec.TempConfigPath), }, ) copyCompTasks = append(copyCompTasks, tb.Build()) } } var sshProxyProps *tui.SSHConnectionProps = &tui.SSHConnectionProps{} if opt.SSHType != executor.SSHTypeNone { var err error if len(opt.SSHProxyHost) != 0 { if sshProxyProps, err = tui.ReadIdentityFileOrPassword(opt.SSHProxyIdentity, opt.SSHProxyUsePassword); err != nil { return err } } } uniqueHosts, noAgentHosts := getMonitorHosts(topo) // Deploy monitor relevant components to remote dlTasks, dpTasks, err := buildMonitoredDeployTask( m, uniqueHosts, noAgentHosts, topo.BaseTopo().GlobalOptions, monitoredOptions, opt, sshProxyProps, ) if err != nil { return err } monitorConfigTasks := buildInitMonitoredConfigTasks( m.specManager, name, uniqueHosts, noAgentHosts, *topo.BaseTopo().GlobalOptions, monitoredOptions, m.logger, opt.SSHTimeout, opt.OptTimeout, opt, sshProxyProps, ) // handle dir scheme changes if hasImported { if err := spec.HandleImportPathMigration(name); err != nil { return err } } ctx := ctxt.New( context.Background(), opt.Concurrency, m.logger, ) tlsCfg, err := topo.TLSConfig(m.specManager.Path(name, spec.TLSCertKeyDir)) if err != nil { return err } // make sure the cluster is stopped if offline && !opt.Force { running := false topo.IterInstance(func(ins spec.Instance) { if !running { status := ins.Status(ctx, time.Duration(opt.APITimeout), tlsCfg, topo.BaseTopo().MasterList...) if strings.HasPrefix(status, "Up") || strings.HasPrefix(status, "Healthy") { running = true } } }, opt.Concurrency) if running { return perrs.Errorf("cluster is running and cannot be upgraded offline") } } b, err := m.sshTaskBuilder(name, topo, base.User, opt) if err != nil { return err } t := b. Parallel(false, downloadCompTasks...). ParallelStep("download monitored", false, dlTasks...). Parallel(opt.Force, copyCompTasks...). ParallelStep("deploy monitored", false, dpTasks...). ParallelStep("refresh monitored config", false, monitorConfigTasks...). Func("UpgradeCluster", func(ctx context.Context) error { if offline { return nil } nopt := opt nopt.Roles = restartComponents waitFunc := func() {} if restartTimeout.Nanoseconds() > 0 { waitFunc = func() { // A tui.PromptWithTimeout(str, time.Duration) would have been nice ch := make(chan any, 1) go func() { tui.Prompt(fmt.Sprintf("\nPress any key to continue (timeout %s)", restartTimeout)) ch <- nil }() select { case <-time.After(restartTimeout): fmt.Printf("\nTimeout, continueing\n") case <-ch: } } } return operator.Upgrade(ctx, topo, nopt, tlsCfg, base.Version, clusterVersion, waitFunc) }). Build() if err := t.Execute(ctx); err != nil { if errorx.Cast(err) != nil { // FIXME: Map possible task errors and give suggestions. return err } return perrs.Trace(err) } // clear patched packages and tags if err := os.RemoveAll(m.specManager.Path(name, "patch")); err != nil { return perrs.Trace(err) } topo.IterInstance(func(ins spec.Instance) { if ins.IsPatched() { ins.SetPatched(false) } }) metadata.SetVersion(clusterVersion) if err := m.specManager.SaveMeta(name, metadata); err != nil { return err } m.logger.Infof("Upgraded cluster `%s` successfully", name) return nil } func versionCompare(curVersion, newVersion string) error { // Can always upgrade to 'nightly' event the current version is 'nightly' if newVersion == utils.NightlyVersionAlias { return nil } switch semver.Compare(curVersion, newVersion) { case -1, 0: return nil case 1: return perrs.Errorf("please specify a higher or equle version than %s", curVersion) default: return perrs.Errorf("unreachable") } } tiup-1.16.3/pkg/cluster/module/000077500000000000000000000000001505422223000163055ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/module/module.go000066400000000000000000000011471505422223000201240ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package module import "github.com/joomcode/errorx" var ( errNS = errorx.NewNamespace("module") ) tiup-1.16.3/pkg/cluster/module/shell.go000066400000000000000000000033241505422223000177450ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package module import ( "context" "fmt" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // ShellModuleConfig is the configurations used to initialize a TiUPModuleSystemd type ShellModuleConfig struct { Command string // the command to run Sudo bool // whether use root privilege to run the command Chdir string // change working directory before running the command UseShell bool // whether use shell to invoke the command } // ShellModule is the module used to control systemd units type ShellModule struct { cmd string // the built command sudo bool } // NewShellModule builds and returns a ShellModule object base on given config. func NewShellModule(config ShellModuleConfig) *ShellModule { cmd := config.Command if config.Chdir != "" { cmd = fmt.Sprintf("cd %s && %s", config.Chdir, cmd) } if config.UseShell { cmd = fmt.Sprintf("%s -c '%s'", defaultShell, cmd) } return &ShellModule{ cmd: cmd, sudo: config.Sudo, } } // Execute passes the command to executor and returns its results, the executor // should be already initialized. func (mod *ShellModule) Execute(ctx context.Context, exec ctxt.Executor) ([]byte, []byte, error) { return exec.Execute(ctx, mod.cmd, mod.sudo) } tiup-1.16.3/pkg/cluster/module/systemd.go000066400000000000000000000062571505422223000203360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package module import ( "context" "fmt" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // scope can be either "system", "user" or "global" const ( SystemdScopeSystem = "system" SystemdScopeUser = "user" SystemdScopeGlobal = "global" ) // SystemdModuleConfig is the configurations used to initialize a SystemdModule type SystemdModuleConfig struct { Unit string // the name of systemd unit(s) Action string // the action to perform with the unit ReloadDaemon bool // run daemon-reload before other actions CheckActive bool // run is-active before action Scope string // user, system or global Force bool // add the `--force` arg to systemctl command Signal string // specify the signal to send to process Timeout time.Duration // timeout to execute the command SystemdMode string } // SystemdModule is the module used to control systemd units type SystemdModule struct { cmd string // the built command sudo bool // does the command need to be run as root timeout time.Duration // timeout to execute the command } // NewSystemdModule builds and returns a SystemdModule object base on // given config. func NewSystemdModule(config SystemdModuleConfig) *SystemdModule { systemctl := "systemctl" sudo := true if config.SystemdMode == "user" { sudo = false } if config.Force { systemctl = fmt.Sprintf("%s --force", systemctl) } if config.Signal != "" { systemctl = fmt.Sprintf("%s --signal %s", systemctl, config.Signal) } switch config.Scope { case SystemdScopeUser: sudo = false // `--user` scope does not need root privilege fallthrough case SystemdScopeGlobal: systemctl = fmt.Sprintf("%s --%s", systemctl, config.Scope) } cmd := fmt.Sprintf("%s %s %s", systemctl, strings.ToLower(config.Action), config.Unit) if config.CheckActive { cmd = fmt.Sprintf("if [[ $(%s is-active %s) == \"active\" ]]; then %s; fi", systemctl, config.Unit, cmd) } if config.ReloadDaemon { cmd = fmt.Sprintf("%s daemon-reload && %s", systemctl, cmd) } mod := &SystemdModule{ cmd: cmd, sudo: sudo, timeout: config.Timeout, } // the default TimeoutStopSec of systemd is 90s, after which it sends a SIGKILL // to remaining processes, set the default value slightly larger than it if config.Timeout == 0 { mod.timeout = time.Second * 100 } return mod } // Execute passes the command to executor and returns its results, the executor // should be already initialized. func (mod *SystemdModule) Execute(ctx context.Context, exec ctxt.Executor) ([]byte, []byte, error) { return exec.Execute(ctx, mod.cmd, mod.sudo, mod.timeout) } tiup-1.16.3/pkg/cluster/module/user.go000066400000000000000000000100371505422223000176130ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package module import ( "context" "fmt" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) const ( defaultShell = "/bin/bash" // UserActionAdd add user. UserActionAdd = "add" // UserActionDel delete user. UserActionDel = "del" // UserActionModify = "modify" // TODO: in RHEL/CentOS, the commands are in /usr/sbin, but in some // other distros they may be in other location such as /usr/bin, we'll // need to check and find the proper path of commands in the future. useraddCmd = "/usr/sbin/useradd" userdelCmd = "/usr/sbin/userdel" groupaddCmd = "/usr/sbin/groupadd" // usermodCmd = "/usr/sbin/usermod" ) var ( errNSUser = errNS.NewSubNamespace("user") // ErrUserAddFailed is ErrUserAddFailed ErrUserAddFailed = errNSUser.NewType("user_add_failed") // ErrUserDeleteFailed is ErrUserDeleteFailed ErrUserDeleteFailed = errNSUser.NewType("user_delete_failed") ) // UserModuleConfig is the configurations used to initialize a UserModule type UserModuleConfig struct { Action string // add, del or modify user Name string // username Group string // group name Home string // home directory of user Shell string // login shell of the user Sudoer bool // when true, the user will be added to sudoers list } // UserModule is the module used to control systemd units type UserModule struct { config UserModuleConfig cmd string // the built command } // NewUserModule builds and returns a UserModule object base on given config. func NewUserModule(config UserModuleConfig) *UserModule { cmd := "" switch config.Action { case UserActionAdd: cmd = useraddCmd // You have to use -m, otherwise no home directory will be created. If you want to specify the path of the home directory, use -d and specify the path // useradd -m -d /PATH/TO/FOLDER cmd += " -m" if config.Home != "" { cmd += " -d" + config.Home } // set user's login shell if config.Shell != "" { cmd = fmt.Sprintf("%s -s %s", cmd, config.Shell) } else { cmd = fmt.Sprintf("%s -s %s", cmd, defaultShell) } // set user's group if config.Group == "" { config.Group = config.Name } // groupadd -f groupAdd := fmt.Sprintf("%s -f %s", groupaddCmd, config.Group) // useradd -g cmd = fmt.Sprintf("%s -g %s %s", cmd, config.Group, config.Name) // prevent errors when username already in use cmd = fmt.Sprintf("id -u %s > /dev/null 2>&1 || (%s && %s)", config.Name, groupAdd, cmd) // add user to sudoers list if config.Sudoer { sudoLine := fmt.Sprintf("%s ALL=(ALL) NOPASSWD:ALL", config.Name) cmd = fmt.Sprintf("%s && %s", cmd, fmt.Sprintf("echo '%s' > /etc/sudoers.d/%s", sudoLine, config.Name)) } case UserActionDel: cmd = fmt.Sprintf("%s -r %s", userdelCmd, config.Name) // prevent errors when user does not exist cmd = fmt.Sprintf("%s || [ $? -eq 6 ]", cmd) // case UserActionModify: // cmd = usermodCmd } return &UserModule{ config: config, cmd: cmd, } } // Execute passes the command to executor and returns its results, the executor // should be already initialized. func (mod *UserModule) Execute(ctx context.Context, exec ctxt.Executor) ([]byte, []byte, error) { a, b, err := exec.Execute(ctx, mod.cmd, true) if err != nil { switch mod.config.Action { case UserActionAdd: return a, b, ErrUserAddFailed. Wrap(err, "Failed to create new system user '%s' on remote host", mod.config.Name) case UserActionDel: return a, b, ErrUserDeleteFailed. Wrap(err, "Failed to delete system user '%s' on remote host", mod.config.Name) } } return a, b, nil } tiup-1.16.3/pkg/cluster/module/wait_for.go000066400000000000000000000046521505422223000204550ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package module import ( "bytes" "context" "fmt" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) // WaitForConfig is the configurations of WaitFor module. type WaitForConfig struct { Port int // Port number to poll. Sleep time.Duration // Duration to sleep between checks, default 1 second. // Choices: // started // stopped // When checking a port started will ensure the port is open, stopped will check that it is closed State string Timeout time.Duration // Maximum duration to wait for. } // WaitFor is the module used to wait for some condition. type WaitFor struct { c WaitForConfig } // NewWaitFor create a WaitFor instance. func NewWaitFor(c WaitForConfig) *WaitFor { if c.Sleep == 0 { c.Sleep = time.Second } if c.Timeout == 0 { c.Timeout = time.Second * 60 } if c.State == "" { c.State = "started" } w := &WaitFor{ c: c, } return w } // Execute the module return nil if successfully wait for the event. func (w *WaitFor) Execute(ctx context.Context, e ctxt.Executor) (err error) { pattern := fmt.Appendf(nil, ":%d ", w.c.Port) retryOpt := utils.RetryOption{ Delay: w.c.Sleep, Timeout: w.c.Timeout, } if err := utils.Retry(func() error { // only listing TCP ports stdout, _, err := executor.UnwarpCheckPointExecutor(e).Execute(ctx, "ss -ltn", false) if err == nil { switch w.c.State { case "started": if bytes.Contains(stdout, pattern) { return nil } case "stopped": if !bytes.Contains(stdout, pattern) { return nil } } return errors.New("still waiting for port state to be satisfied") } return err }, retryOpt); err != nil { zap.L().Debug("retry error", zap.Error(err)) return errors.Errorf("timed out waiting for port %d to be %s after %s", w.c.Port, w.c.State, w.c.Timeout) } return nil } tiup-1.16.3/pkg/cluster/operation/000077500000000000000000000000001505422223000170205ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/operation/action.go000066400000000000000000000474351505422223000206410ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "bytes" "context" "crypto/tls" "fmt" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/module" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/set" "golang.org/x/sync/errgroup" "golang.org/x/text/cases" "golang.org/x/text/language" ) var ( actionPrevMsgs = map[string]string{ "start": "Starting", "stop": "Stopping", "enable": "Enabling", "disable": "Disabling", } actionPostMsgs = map[string]string{} ) func init() { for action := range actionPrevMsgs { actionPostMsgs[action] = cases.Title(language.English).String(action) } } // Enable will enable/disable the cluster func Enable( ctx context.Context, cluster spec.Topology, options Options, isEnable bool, ) error { roleFilter := set.NewStringSet(options.Roles...) nodeFilter := set.NewStringSet(options.Nodes...) components := cluster.ComponentsByStartOrder() components = FilterComponent(components, roleFilter) monitoredOptions := cluster.GetMonitoredOptions() noAgentHosts := set.NewStringSet() systemdMode := string(cluster.BaseTopo().GlobalOptions.SystemdMode) instCount := map[string]int{} cluster.IterInstance(func(inst spec.Instance) { if inst.IgnoreMonitorAgent() { noAgentHosts.Insert(inst.GetManageHost()) } else { instCount[inst.GetManageHost()]++ } }) for _, comp := range components { insts := FilterInstance(comp.Instances(), nodeFilter) err := EnableComponent(ctx, insts, noAgentHosts, options, isEnable, systemdMode) if err != nil { return errors.Annotatef(err, "failed to enable/disable %s", comp.Name()) } for _, inst := range insts { if !inst.IgnoreMonitorAgent() { instCount[inst.GetManageHost()]-- } } } if monitoredOptions == nil { return nil } hosts := make([]string, 0) for host, count := range instCount { // don't disable the monitor component if the instance's host contain other components if count != 0 { continue } hosts = append(hosts, host) } return EnableMonitored(ctx, hosts, noAgentHosts, monitoredOptions, options.OptTimeout, isEnable, systemdMode) } // Start the cluster. func Start( ctx context.Context, cluster spec.Topology, options Options, restoreLeader bool, tlsCfg *tls.Config, ) error { uniqueHosts := set.NewStringSet() roleFilter := set.NewStringSet(options.Roles...) nodeFilter := set.NewStringSet(options.Nodes...) components := cluster.ComponentsByStartOrder() components = FilterComponent(components, roleFilter) monitoredOptions := cluster.GetMonitoredOptions() noAgentHosts := set.NewStringSet() systemdMode := string(cluster.BaseTopo().GlobalOptions.SystemdMode) cluster.IterInstance(func(inst spec.Instance) { if inst.IgnoreMonitorAgent() { noAgentHosts.Insert(inst.GetManageHost()) } }) for _, comp := range components { insts := FilterInstance(comp.Instances(), nodeFilter) err := StartComponent(ctx, insts, noAgentHosts, options, tlsCfg, systemdMode) if err != nil { return errors.Annotatef(err, "failed to start %s", comp.Name()) } errg, _ := errgroup.WithContext(ctx) for _, inst := range insts { if !inst.IgnoreMonitorAgent() { uniqueHosts.Insert(inst.GetManageHost()) } if restoreLeader { rIns, ok := inst.(spec.RollingUpdateInstance) if ok { // checkpoint must be in a new context nctx := checkpoint.NewContext(ctx) errg.Go(func() error { err := rIns.PostRestart(nctx, cluster, tlsCfg, nil) if err != nil && !options.Force { return err } return nil }) } } } if err := errg.Wait(); err != nil { return err } } if monitoredOptions == nil { return nil } hosts := make([]string, 0, len(uniqueHosts)) for host := range uniqueHosts { hosts = append(hosts, host) } return StartMonitored(ctx, hosts, noAgentHosts, monitoredOptions, options.OptTimeout, systemdMode) } // Stop the cluster. func Stop( ctx context.Context, cluster spec.Topology, options Options, evictLeader bool, tlsCfg *tls.Config, ) error { roleFilter := set.NewStringSet(options.Roles...) nodeFilter := set.NewStringSet(options.Nodes...) components := cluster.ComponentsByStopOrder() components = FilterComponent(components, roleFilter) monitoredOptions := cluster.GetMonitoredOptions() noAgentHosts := set.NewStringSet() systemdMode := string(cluster.BaseTopo().GlobalOptions.SystemdMode) instCount := map[string]int{} cluster.IterInstance(func(inst spec.Instance) { if inst.IgnoreMonitorAgent() { noAgentHosts.Insert(inst.GetManageHost()) } else { instCount[inst.GetManageHost()]++ } }) for _, comp := range components { insts := FilterInstance(comp.Instances(), nodeFilter) err := StopComponent( ctx, cluster, insts, noAgentHosts, options, true, evictLeader, tlsCfg, ) if err != nil && !options.Force { return errors.Annotatef(err, "failed to stop %s", comp.Name()) } for _, inst := range insts { if !inst.IgnoreMonitorAgent() { instCount[inst.GetManageHost()]-- } } } if monitoredOptions == nil { return nil } hosts := make([]string, 0) for host, count := range instCount { if count != 0 { continue } hosts = append(hosts, host) } if err := StopMonitored(ctx, hosts, noAgentHosts, monitoredOptions, options.OptTimeout, systemdMode); err != nil && !options.Force { return err } return nil } // NeedCheckTombstone return true if we need to check and destroy some node. func NeedCheckTombstone(topo *spec.Specification) bool { for _, s := range topo.TiKVServers { if s.Offline { return true } } for _, s := range topo.TiFlashServers { if s.Offline { return true } } for _, s := range topo.PumpServers { if s.Offline { return true } } for _, s := range topo.Drainers { if s.Offline { return true } } return false } // Restart the cluster. func Restart( ctx context.Context, cluster spec.Topology, options Options, tlsCfg *tls.Config, ) error { err := Stop(ctx, cluster, options, false, tlsCfg) if err != nil { return errors.Annotatef(err, "failed to stop") } err = Start(ctx, cluster, options, false, tlsCfg) if err != nil { return errors.Annotatef(err, "failed to start") } return nil } // StartMonitored start BlackboxExporter and NodeExporter func StartMonitored(ctx context.Context, hosts []string, noAgentHosts set.StringSet, options *spec.MonitoredOptions, timeout uint64, systemdMode string) error { return systemctlMonitor(ctx, hosts, noAgentHosts, options, "start", timeout, systemdMode) } // StopMonitored stop BlackboxExporter and NodeExporter func StopMonitored(ctx context.Context, hosts []string, noAgentHosts set.StringSet, options *spec.MonitoredOptions, timeout uint64, systemdMode string) error { return systemctlMonitor(ctx, hosts, noAgentHosts, options, "stop", timeout, systemdMode) } // RestartMonitored stop BlackboxExporter and NodeExporter func RestartMonitored(ctx context.Context, hosts []string, noAgentHosts set.StringSet, options *spec.MonitoredOptions, timeout uint64, systemdMode string) error { err := StopMonitored(ctx, hosts, noAgentHosts, options, timeout, systemdMode) if err != nil { return err } return StartMonitored(ctx, hosts, noAgentHosts, options, timeout, systemdMode) } // EnableMonitored enable/disable monitor service in a cluster func EnableMonitored(ctx context.Context, hosts []string, noAgentHosts set.StringSet, options *spec.MonitoredOptions, timeout uint64, isEnable bool, systemdMode string) error { action := "disable" if isEnable { action = "enable" } return systemctlMonitor(ctx, hosts, noAgentHosts, options, action, timeout, systemdMode) } func systemctlMonitor(ctx context.Context, hosts []string, noAgentHosts set.StringSet, options *spec.MonitoredOptions, action string, timeout uint64, systemdMode string) error { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) ports := monitorPortMap(options) for _, comp := range []string{spec.ComponentNodeExporter, spec.ComponentBlackboxExporter} { logger.Infof("%s component %s", actionPrevMsgs[action], comp) errg, _ := errgroup.WithContext(ctx) for _, host := range hosts { if noAgentHosts.Exist(host) { logger.Debugf("Ignored %s component %s for %s", action, comp, host) continue } nctx := checkpoint.NewContext(ctx) errg.Go(func() error { logger.Infof("\t%s instance %s", actionPrevMsgs[action], host) e := ctxt.GetInner(nctx).Get(host) service := fmt.Sprintf("%s-%d.service", comp, ports[comp]) if err := systemctl(nctx, e, service, action, timeout, systemdMode); err != nil { return toFailedActionError(err, action, host, service, "") } var err error switch action { case "start": err = spec.PortStarted(nctx, e, ports[comp], timeout) case "stop": err = spec.PortStopped(nctx, e, ports[comp], timeout) } if err != nil { return toFailedActionError(err, action, host, service, "") } logger.Infof("\t%s %s success", actionPostMsgs[action], host) return nil }) } if err := errg.Wait(); err != nil { return err } } return nil } func restartInstance(ctx context.Context, ins spec.Instance, timeout uint64, tlsCfg *tls.Config, systemdMode string) error { e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) logger.Infof("\tRestarting instance %s", ins.ID()) if err := systemctl(ctx, e, ins.ServiceName(), "restart", timeout, systemdMode); err != nil { return toFailedActionError(err, "restart", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } // Check ready. if err := ins.Ready(ctx, e, timeout, tlsCfg); err != nil { return toFailedActionError(err, "restart", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } logger.Infof("\tRestart instance %s success", ins.ID()) return nil } func enableInstance(ctx context.Context, ins spec.Instance, timeout uint64, isEnable bool, systemdMode string) error { e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) action := "disable" if isEnable { action = "enable" } logger.Infof("\t%s instance %s", actionPrevMsgs[action], ins.ID()) // Enable/Disable by systemd. if err := systemctl(ctx, e, ins.ServiceName(), action, timeout, systemdMode); err != nil { return toFailedActionError(err, action, ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } logger.Infof("\t%s instance %s success", actionPostMsgs[action], ins.ID()) return nil } func startInstance(ctx context.Context, ins spec.Instance, timeout uint64, tlsCfg *tls.Config, systemdMode string) error { e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) logger.Infof("\tStarting instance %s", ins.ID()) if err := systemctl(ctx, e, ins.ServiceName(), "start", timeout, systemdMode); err != nil { return toFailedActionError(err, "start", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } // Check ready. if err := ins.Ready(ctx, e, timeout, tlsCfg); err != nil { return toFailedActionError(err, "start", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } logger.Infof("\tStart instance %s success", ins.ID()) return nil } func systemctl(ctx context.Context, executor ctxt.Executor, service string, action string, timeout uint64, scope string) error { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) c := module.SystemdModuleConfig{ Unit: service, ReloadDaemon: true, Action: action, Timeout: time.Second * time.Duration(timeout), Scope: scope, } systemd := module.NewSystemdModule(c) stdout, stderr, err := systemd.Execute(ctx, executor) if len(stdout) > 0 { fmt.Println(string(stdout)) } if len(stderr) > 0 && !bytes.Contains(stderr, []byte("Created symlink ")) && !bytes.Contains(stderr, []byte("Removed symlink ")) { logger.Errorf("%s", string(stderr)) } if len(stderr) > 0 && action == "stop" { // ignore "unit not loaded" error, as this means the unit is not // exist, and that's exactly what we want // NOTE: there will be a potential bug if the unit name is set // wrong and the real unit still remains started. if bytes.Contains(stderr, []byte(" not loaded.")) { logger.Warnf("%s", string(stderr)) return nil // reset the error to avoid exiting } logger.Errorf("%s", string(stderr)) } return err } // EnableComponent enable/disable the instances func EnableComponent(ctx context.Context, instances []spec.Instance, noAgentHosts set.StringSet, options Options, isEnable bool, systemdMode string) error { if len(instances) == 0 { return nil } logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) name := instances[0].ComponentName() if isEnable { logger.Infof("Enabling component %s", name) } else { logger.Infof("Disabling component %s", name) } errg, _ := errgroup.WithContext(ctx) for _, ins := range instances { // skip certain instances switch name { case spec.ComponentNodeExporter, spec.ComponentBlackboxExporter: if noAgentHosts.Exist(ins.GetManageHost()) { logger.Debugf("Ignored enabling/disabling %s for %s:%d", name, ins.GetManageHost(), ins.GetPort()) continue } } // the checkpoint part of context can't be shared between goroutines // since it's used to trace the stack, so we must create a new layer // of checkpoint context every time put it into a new goroutine. nctx := checkpoint.NewContext(ctx) errg.Go(func() error { err := enableInstance(nctx, ins, options.OptTimeout, isEnable, systemdMode) if err != nil { return err } return nil }) } return errg.Wait() } // StartComponent start the instances. func StartComponent(ctx context.Context, instances []spec.Instance, noAgentHosts set.StringSet, options Options, tlsCfg *tls.Config, systemdMode string) error { if len(instances) == 0 { return nil } logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) name := instances[0].ComponentName() logger.Infof("Starting component %s", name) // start instances in serial for Raft related components // eg: PD has more strict restrictions on the capacity expansion process, // that is, there should be only one node in the peer-join stage at most // ref https://github.com/tikv/pd/blob/d38b36714ccee70480c39e07126e3456b5fb292d/server/join/join.go#L179-L191 if options.Operation == ScaleOutOperation { switch name { case spec.ComponentPD, spec.ComponentTiFlash, spec.ComponentDMMaster: return serialStartInstances(ctx, instances, options, tlsCfg, systemdMode) } } errg, _ := errgroup.WithContext(ctx) for _, ins := range instances { switch name { case spec.ComponentNodeExporter, spec.ComponentBlackboxExporter: if noAgentHosts.Exist(ins.GetManageHost()) { logger.Debugf("Ignored starting %s for %s:%d", name, ins.GetManageHost(), ins.GetPort()) continue } } // the checkpoint part of context can't be shared between goroutines // since it's used to trace the stack, so we must create a new layer // of checkpoint context every time put it into a new goroutine. nctx := checkpoint.NewContext(ctx) errg.Go(func() error { if err := ins.PrepareStart(nctx, tlsCfg); err != nil { return err } return startInstance(nctx, ins, options.OptTimeout, tlsCfg, systemdMode) }) } return errg.Wait() } func serialStartInstances(ctx context.Context, instances []spec.Instance, options Options, tlsCfg *tls.Config, systemdMode string) error { for _, ins := range instances { if err := ins.PrepareStart(ctx, tlsCfg); err != nil { return err } if err := startInstance(ctx, ins, options.OptTimeout, tlsCfg, systemdMode); err != nil { return err } } return nil } func stopInstance(ctx context.Context, ins spec.Instance, timeout uint64, systemdMode string) error { e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) logger.Infof("\tStopping instance %s", ins.GetManageHost()) if err := systemctl(ctx, e, ins.ServiceName(), "stop", timeout, systemdMode); err != nil { return toFailedActionError(err, "stop", ins.GetManageHost(), ins.ServiceName(), ins.LogDir()) } logger.Infof("\tStop %s %s success", ins.ComponentName(), ins.ID()) return nil } // StopComponent stop the instances. func StopComponent(ctx context.Context, topo spec.Topology, instances []spec.Instance, noAgentHosts set.StringSet, options Options, forceStop bool, evictLeader bool, tlsCfg *tls.Config, ) error { if len(instances) == 0 { return nil } logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) name := instances[0].ComponentName() logger.Infof("Stopping component %s", name) systemdMode := string(topo.BaseTopo().GlobalOptions.SystemdMode) errg, _ := errgroup.WithContext(ctx) for _, ins := range instances { switch name { case spec.ComponentNodeExporter, spec.ComponentBlackboxExporter: if noAgentHosts.Exist(ins.GetManageHost()) { logger.Debugf("Ignored stopping %s for %s:%d", name, ins.GetManageHost(), ins.GetPort()) continue } case spec.ComponentCDC: nctx := checkpoint.NewContext(ctx) if !forceStop { // when scale-in cdc node, each node should be stopped one by one. cdc, ok := ins.(spec.RollingUpdateInstance) if !ok { panic("cdc should support rolling upgrade, but not") } err := cdc.PreRestart(nctx, topo, int(options.APITimeout), tlsCfg, nil) if err != nil { // this should never hit, since all errors swallowed to trigger hard stop. return err } } if err := stopInstance(nctx, ins, options.OptTimeout, systemdMode); err != nil { return err } // continue here, to skip the logic below. continue } // the checkpoint part of context can't be shared between goroutines // since it's used to trace the stack, so we must create a new layer // of checkpoint context every time put it into a new goroutine. nctx := checkpoint.NewContext(ctx) errg.Go(func() error { if evictLeader { rIns, ok := ins.(spec.RollingUpdateInstance) if ok { err := rIns.PreRestart(nctx, topo, int(options.APITimeout), tlsCfg, nil) if err != nil { return err } } } err := stopInstance(nctx, ins, options.OptTimeout, systemdMode) if err != nil { return err } return nil }) } return errg.Wait() } // toFailedActionError formats the error msg for failed action func toFailedActionError(err error, action string, host, service, logDir string) error { return errors.Annotatef(err, "failed to %s: %s %s, please check the instance's log(%s) for more detail.", action, host, service, logDir, ) } func monitorPortMap(options *spec.MonitoredOptions) map[string]int { return map[string]int{ spec.ComponentNodeExporter: options.NodeExporterPort, spec.ComponentBlackboxExporter: options.BlackboxExporterPort, } } func executeSSHCommand(ctx context.Context, action, host, command string) error { if command == "" { return nil } e, found := ctxt.GetInner(ctx).GetExecutor(host) if !found { return fmt.Errorf("no executor") } logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) logger.Infof("\t%s on %s", action, host) stdout, stderr, err := e.Execute(ctx, command, false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } logger.Infof("\t%s", stdout) return nil } tiup-1.16.3/pkg/cluster/operation/check.go000066400000000000000000000715531505422223000204370ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "context" "encoding/json" "fmt" "math" "sort" "strconv" "strings" "github.com/AstroProfundis/sysinfo" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/module" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/insight" "go.uber.org/zap" ) // CheckOptions control the list of checks to be performed type CheckOptions struct { // checks that are disabled by default EnableCPU bool EnableMem bool EnableDisk bool // pre-defined goups of checks // GroupMinimal bool // a minimal set of checks } // Names of checks var ( CheckNameGeneral = "general" // errors that don't fit any specific check CheckNameNTP = "ntp" CheckNameChrony = "chrony" CheckNameOSVer = "os-version" CheckNameSwap = "swap" CheckNameSysctl = "sysctl" CheckNameCPUThreads = "cpu-cores" CheckNameCPUGovernor = "cpu-governor" CheckNameDisks = "disk" CheckNamePortListen = "listening-port" CheckNameEpoll = "epoll-exclusive" CheckNameMem = "memory" CheckNameNet = "network" CheckNameLimits = "limits" CheckNameSysService = "service" CheckNameSELinux = "selinux" CheckNameCommand = "command" CheckNameFio = "fio" CheckNameTHP = "thp" CheckNameDirPermission = "permission" CheckNameDirExist = "exist" CheckNameTimeZone = "timezone" ) // CheckResult is the result of a check type CheckResult struct { Name string // Name of the check Err error // An embedded error Warn bool // The check didn't pass, but not a big problem Msg string // A message or description } // Error implements the error interface func (c CheckResult) Error() string { return c.Err.Error() } // String returns a readable string of the error func (c CheckResult) String() string { return fmt.Sprintf("check failed for %s: %s", c.Name, c.Err) } // Unwrap implements the Wrapper interface func (c CheckResult) Unwrap() error { return c.Err } // IsWarning checks if the result is a warning error func (c CheckResult) IsWarning() bool { return c.Warn } // Passed checks if the result is a success func (c CheckResult) Passed() bool { return c.Err == nil } // CheckSystemInfo performs checks with basic system info func CheckSystemInfo(opt *CheckOptions, rawData []byte) []*CheckResult { var results []*CheckResult var insightInfo insight.Info if err := json.Unmarshal(rawData, &insightInfo); err != nil { return append(results, &CheckResult{ Name: CheckNameGeneral, Err: err, }) } // check basic system info results = append(results, checkSysInfo(opt, &insightInfo.SysInfo)...) // check time sync status switch { case insightInfo.ChronyStat.LeapStatus != "none": results = append(results, checkChrony(&insightInfo.ChronyStat)) case insightInfo.NTP.Status != "none": results = append(results, checkNTP(&insightInfo.NTP)) default: results = append(results, &CheckResult{ Name: CheckNameNTP, Err: fmt.Errorf("The NTPd daemon or Chronyd daemon may be not installed"), Warn: true, }, ) } epollResult := &CheckResult{ Name: CheckNameEpoll, } if !insightInfo.EpollExcl { epollResult.Err = fmt.Errorf("epoll exclusive is not supported") } results = append(results, epollResult) return results } func checkSysInfo(opt *CheckOptions, sysInfo *sysinfo.SysInfo) []*CheckResult { var results []*CheckResult results = append(results, checkOSInfo(opt, &sysInfo.OS)) // check cpu capacities results = append(results, checkCPU(opt, &sysInfo.CPU)...) // check memory size results = append(results, checkMem(opt, &sysInfo.Memory)...) // check network results = append(results, checkNetwork(opt, sysInfo.Network)...) return results } // Try to keep this in sync with // https://docs.pingcap.com/tidb/stable/hardware-and-software-requirements#os-and-platform-requirements // // This information is in most cases based on the `ID` (Vendor) and `VERSION_ID` (Release) of /etc/os-release // See https://github.com/AstroProfundis/sysinfo/blob/tiup/os.go for details. func checkOSInfo(opt *CheckOptions, osInfo *sysinfo.OS) *CheckResult { result := &CheckResult{ Name: CheckNameOSVer, Msg: fmt.Sprintf("OS is %s %s", osInfo.Name, osInfo.Release), } // check OS vendor switch osInfo.Vendor { case "kylin": // VERSION_ID="V10" if ver, _ := strconv.ParseFloat(strings.Trim(osInfo.Version, "V"), 64); ver < 10 { result.Err = fmt.Errorf("%s %s not supported, use version V10 or higher", osInfo.Name, osInfo.Release) return result } case "amzn": // https://aws.amazon.com/linux/amazon-linux-2023/ if osInfo.Version == "2023" { return result } // Amazon Linux 2 is based on CentOS 7 and is recommended for // AWS Graviton 2 (ARM64) deployments. // https://aws.amazon.com/amazon-linux-2/ if ver, _ := strconv.ParseFloat(osInfo.Version, 64); ver < 2 || ver >= 3 { result.Err = fmt.Errorf("%s %s not supported, use Amazon Linux 2 or Amazon Linux 2023 please", osInfo.Name, osInfo.Release) return result } case "centos": // CentOS Linux is EOL // CentOS Stream 9 and newer is still fine if ver, _ := strconv.ParseFloat(osInfo.Version, 64); ver < 9 { result.Err = fmt.Errorf("%s %s not supported, use version 9 or higher", osInfo.Name, osInfo.Release) return result } case "redhat", "rhel", "ol": // RHEL 8.4 or newer 8.x versions are supported if ver, _ := strconv.ParseFloat(osInfo.Version, 64); ver < 8.4 || ver >= 9 { result.Err = fmt.Errorf("%s %s not supported, use version 8.4 or a later 8.x version please", osInfo.Name, osInfo.Release) return result } case "rocky": // Rocky Linux if ver, _ := strconv.ParseFloat(osInfo.Version, 64); ver < 9.1 { result.Err = fmt.Errorf("%s %s not supported, use version 9.1 or later please", osInfo.Name, osInfo.Release) return result } case "debian": // debian support is not fully tested, but we suppose it should work msg := "Debian support is not fully tested, be careful" result.Err = fmt.Errorf("%s (%s)", result.Msg, msg) result.Warn = true if ver, _ := strconv.ParseFloat(osInfo.Version, 64); ver < 10 { result.Err = fmt.Errorf("%s %s not supported, use version 10 or higher (%s)", osInfo.Name, osInfo.Release, msg) result.Warn = false return result } case "ubuntu": // ubuntu support is not fully tested, but we suppose it should work msg := "Ubuntu support is not fully tested, be careful" result.Err = fmt.Errorf("%s (%s)", result.Msg, msg) result.Warn = true if ver, _ := strconv.ParseFloat(osInfo.Version, 64); ver < 20.04 { result.Err = fmt.Errorf("%s %s not supported, use version 20.04 or higher (%s)", osInfo.Name, osInfo.Release, msg) result.Warn = false return result } case "openEuler": return result default: result.Err = fmt.Errorf("OS vendor %s not supported", osInfo.Vendor) return result } // TODO: check OS architecture return result } func checkNTP(ntpInfo *insight.TimeStat) *CheckResult { result := &CheckResult{ Name: CheckNameNTP, } if ntpInfo.Status == "none" { zap.L().Info("The NTPd daemon may be not installed, skip.") return result } if ntpInfo.Sync == "none" { result.Err = fmt.Errorf("The NTPd daemon may be not start") result.Warn = true return result } // check if time offset greater than +- 500ms if math.Abs(ntpInfo.Offset) >= 500 { result.Err = fmt.Errorf("time offset %fms too high", ntpInfo.Offset) } return result } func checkChrony(chronyInfo *insight.ChronyStat) *CheckResult { result := &CheckResult{ Name: CheckNameChrony, } if chronyInfo.LeapStatus == "none" { zap.L().Info("The Chrony daemon may be not installed, skip.") return result } // check if time offset greater than +- 500ms if math.Abs(chronyInfo.LastOffset) >= 500 { result.Err = fmt.Errorf("time offset %fms too high", chronyInfo.LastOffset) } return result } func checkCPU(opt *CheckOptions, cpuInfo *sysinfo.CPU) []*CheckResult { var results []*CheckResult if opt.EnableCPU && cpuInfo.Threads < 16 { results = append(results, &CheckResult{ Name: CheckNameCPUThreads, Err: fmt.Errorf("CPU thread count %d too low, needs 16 or more", cpuInfo.Threads), }) } else { results = append(results, &CheckResult{ Name: CheckNameCPUThreads, Msg: fmt.Sprintf("number of CPU cores / threads: %d", cpuInfo.Threads), }) } // check for CPU frequency governor if cpuInfo.Governor != "" { if cpuInfo.Governor != "performance" { results = append(results, &CheckResult{ Name: CheckNameCPUGovernor, Err: fmt.Errorf("CPU frequency governor is %s, should use performance", cpuInfo.Governor), }) } else { results = append(results, &CheckResult{ Name: CheckNameCPUGovernor, Msg: fmt.Sprintf("CPU frequency governor is %s", cpuInfo.Governor), }) } } else { results = append(results, &CheckResult{ Name: CheckNameCPUGovernor, Err: fmt.Errorf("Unable to determine current CPU frequency governor policy"), Warn: true, }) } return results } func checkMem(opt *CheckOptions, memInfo *sysinfo.Memory) []*CheckResult { var results []*CheckResult if memInfo.Swap > 0 { results = append(results, &CheckResult{ Name: CheckNameSwap, Warn: true, Err: fmt.Errorf("swap is enabled, please disable it for best performance"), }) } // 32GB if opt.EnableMem && memInfo.Size < 1024*32 { results = append(results, &CheckResult{ Name: CheckNameMem, Err: fmt.Errorf("memory size %dMB too low, needs 32GB or more", memInfo.Size), }) } else { results = append(results, &CheckResult{ Name: CheckNameMem, Msg: fmt.Sprintf("memory size is %dMB", memInfo.Size), }) } return results } func checkNetwork(opt *CheckOptions, networkDevices []sysinfo.NetworkDevice) []*CheckResult { var results []*CheckResult for _, netdev := range networkDevices { // ignore the network devices that cannot be detected if netdev.Speed == 0 { continue } if netdev.Speed >= 1000 { results = append(results, &CheckResult{ Name: CheckNameNet, Msg: fmt.Sprintf("network speed of %s is %dMB", netdev.Name, netdev.Speed), }) } else { results = append(results, &CheckResult{ Name: CheckNameNet, Err: fmt.Errorf("network speed of %s is %dMB too low, needs 1GB or more", netdev.Name, netdev.Speed), }) } } return results } // CheckSysLimits checks limits in /etc/security/limits.conf func CheckSysLimits(opt *CheckOptions, user string, l []byte) []*CheckResult { var results []*CheckResult var ( stackSoft int nofileSoft int nofileHard int ) for line := range strings.SplitSeq(string(l), "\n") { line = strings.TrimSpace(line) if strings.HasPrefix(line, "#") { continue } fields := strings.Fields(line) if len(fields) < 3 || fields[0] != user { continue } switch fields[2] { case "nofile": if fields[1] == "soft" { nofileSoft, _ = strconv.Atoi(fields[3]) } else { nofileHard, _ = strconv.Atoi(fields[3]) } case "stack": if fields[1] == "soft" { stackSoft, _ = strconv.Atoi(fields[3]) } } } if nofileSoft < 1000000 { results = append(results, &CheckResult{ Name: CheckNameLimits, Err: fmt.Errorf("soft limit of 'nofile' for user '%s' is not set or too low", user), Msg: fmt.Sprintf("%s soft nofile 1000000", user), }) } if nofileHard < 1000000 { results = append(results, &CheckResult{ Name: CheckNameLimits, Err: fmt.Errorf("hard limit of 'nofile' for user '%s' is not set or too low", user), Msg: fmt.Sprintf("%s hard nofile 1000000", user), }) } if stackSoft < 10240 { results = append(results, &CheckResult{ Name: CheckNameLimits, Err: fmt.Errorf("soft limit of 'stack' for user '%s' is not set or too low", user), Msg: fmt.Sprintf("%s soft stack 10240", user), }) } // all pass if len(results) < 1 { results = append(results, &CheckResult{ Name: CheckNameLimits, }) } return results } // CheckKernelParameters checks kernel parameter values func CheckKernelParameters(opt *CheckOptions, p []byte) []*CheckResult { var results []*CheckResult for line := range strings.SplitSeq(string(p), "\n") { line = strings.TrimSpace(line) fields := strings.Fields(line) if len(fields) < 3 { continue } switch fields[0] { case "fs.file-max": val, _ := strconv.Atoi(fields[2]) if val < 1000000 { results = append(results, &CheckResult{ Name: CheckNameSysctl, Err: fmt.Errorf("fs.file-max = %d, should be greater than 1000000", val), Msg: "fs.file-max = 1000000", }) } case "net.core.somaxconn": val, _ := strconv.Atoi(fields[2]) if val < 32768 { results = append(results, &CheckResult{ Name: CheckNameSysctl, Err: fmt.Errorf("net.core.somaxconn = %d, should 32768 or greater", val), Msg: "net.core.somaxconn = 32768", }) } case "net.ipv4.tcp_tw_recycle": val, _ := strconv.Atoi(fields[2]) if val != 0 { results = append(results, &CheckResult{ Name: CheckNameSysctl, Err: fmt.Errorf("net.ipv4.tcp_tw_recycle = %d, should be 0", val), Msg: "net.ipv4.tcp_tw_recycle = 0", }) } case "net.ipv4.tcp_syncookies": val, _ := strconv.Atoi(fields[2]) if val != 0 { results = append(results, &CheckResult{ Name: CheckNameSysctl, Err: fmt.Errorf("net.ipv4.tcp_syncookies = %d, should be 0", val), Msg: "net.ipv4.tcp_syncookies = 0", }) } case "vm.overcommit_memory": val, _ := strconv.Atoi(fields[2]) if opt.EnableMem && val != 0 && val != 1 { results = append(results, &CheckResult{ Name: CheckNameSysctl, Err: fmt.Errorf("vm.overcommit_memory = %d, should be 0 or 1", val), Msg: "vm.overcommit_memory = 1", }) } case "vm.swappiness": val, _ := strconv.Atoi(fields[2]) if val != 0 { results = append(results, &CheckResult{ Name: CheckNameSysctl, Err: fmt.Errorf("vm.swappiness = %d, should be 0", val), Msg: "vm.swappiness = 0", }) } } } // all pass if len(results) < 1 { results = append(results, &CheckResult{ Name: CheckNameSysctl, }) } return results } // CheckServices checks if a service is running on the host func CheckServices(ctx context.Context, e ctxt.Executor, host, service string, disable bool, systemdMode spec.SystemdMode) *CheckResult { result := &CheckResult{ Name: CheckNameSysService, } // check if the service exist before checking its status, ignore when non-exist stdout, _, err := e.Execute( ctx, fmt.Sprintf( "systemctl list-unit-files --type service | grep -i %s.service | wc -l", service), systemdMode != spec.UserMode) if err != nil { result.Err = err return result } if cnt, _ := strconv.Atoi(strings.Trim(string(stdout), "\n")); cnt == 0 { if !disable { result.Err = fmt.Errorf("service %s not found, should be installed and started", service) } result.Msg = fmt.Sprintf("service %s not found, ignore", service) return result } // The service checked here needs to use systemctl in system mode, so the value passed by scope is empty. active, _, _, err := GetServiceStatus(ctx, e, service+".service", "", string(systemdMode)) if err != nil { result.Err = err } switch disable { case false: if active != "active" { result.Err = fmt.Errorf("service %s is not running", service) result.Msg = fmt.Sprintf("start %s.service", service) } case true: if active == "active" { result.Err = fmt.Errorf("service %s is running but should be stopped", service) result.Msg = fmt.Sprintf("stop %s.service", service) } } return result } // CheckSELinux checks if SELinux is enabled on the host func CheckSELinux(ctx context.Context, e ctxt.Executor, sudo bool) *CheckResult { result := &CheckResult{ Name: CheckNameSELinux, } m := module.NewShellModule(module.ShellModuleConfig{ // ignore grep errors, the file may not exist for some systems Command: "grep -E '^\\s*SELINUX=enforcing' /etc/selinux/config 2>/dev/null | wc -l", Sudo: sudo, }) stdout, stderr, err := m.Execute(ctx, e) if err != nil { result.Err = fmt.Errorf("%w %s", err, stderr) return result } out := strings.Trim(string(stdout), "\n") lines, err := strconv.Atoi(out) if err != nil { result.Err = fmt.Errorf("can not check SELinux status, please validate manually, %s", err) result.Warn = true return result } if lines > 0 { result.Err = fmt.Errorf("SELinux is not disabled") } else { result.Msg = "SELinux is disabled" } return result } // CheckListeningPort checks if the ports are already binded by some process on host func CheckListeningPort(opt *CheckOptions, host string, topo *spec.Specification, rawData []byte) []*CheckResult { var results []*CheckResult ports := make(map[int]struct{}) topo.IterInstance(func(inst spec.Instance) { if inst.GetManageHost() != host { return } for _, up := range inst.UsedPorts() { if _, found := ports[up]; !found { ports[up] = struct{}{} } } }) for p := range ports { for line := range strings.SplitSeq(string(rawData), "\n") { fields := strings.Fields(line) if len(fields) < 5 || fields[0] != "LISTEN" { continue } addr := strings.Split(fields[3], ":") lp, _ := strconv.Atoi(addr[len(addr)-1]) if p == lp { results = append(results, &CheckResult{ Name: CheckNamePortListen, Err: fmt.Errorf("port %d is already in use", lp), }) break // ss may report multiple entries for the same port } } } return results } // CheckPartitions checks partition info of data directories func CheckPartitions(opt *CheckOptions, host string, topo *spec.Specification, rawData []byte) []*CheckResult { var results []*CheckResult var insightInfo insight.Info if err := json.Unmarshal(rawData, &insightInfo); err != nil { return append(results, &CheckResult{ Name: CheckNameDisks, Err: err, }) } flt := flatPartitions(insightInfo.Partitions) parts := sortPartitions(flt) // check if multiple instances are using the same partition as data storage type storePartitionInfo struct { comp string path string } uniqueStores := make(map[string][]storePartitionInfo) // host+partition -> info topo.IterInstance(func(inst spec.Instance) { if inst.GetManageHost() != host { return } for _, dataDir := range spec.MultiDirAbs(topo.GlobalOptions.User, inst.DataDir()) { if dataDir == "" { continue } blk := getDisk(parts, dataDir) if blk == nil { return } // only check for TiKV and TiFlash, other components are not that I/O sensitive switch inst.ComponentName() { case spec.ComponentTiKV, spec.ComponentTiFlash: usKey := fmt.Sprintf("%s:%s", host, blk.Mount.MountPoint) uniqueStores[usKey] = append(uniqueStores[usKey], storePartitionInfo{ comp: inst.ComponentName(), path: dataDir, }) } switch blk.Mount.FSType { case "ext4": if !strings.Contains(blk.Mount.Options, "nodelalloc") { results = append(results, &CheckResult{ Name: CheckNameDisks, Err: fmt.Errorf("mount point %s does not have 'nodelalloc' option set", blk.Mount.MountPoint), }) } fallthrough case "xfs": if !strings.Contains(blk.Mount.Options, "noatime") { results = append(results, &CheckResult{ Name: CheckNameDisks, Err: fmt.Errorf("mount point %s does not have 'noatime' option set", blk.Mount.MountPoint), Warn: true, }) } default: results = append(results, &CheckResult{ Name: CheckNameDisks, Err: fmt.Errorf("mount point %s has an unsupported filesystem '%s'", blk.Mount.MountPoint, blk.Mount.FSType), }) } } }) for key, parts := range uniqueStores { if len(parts) > 1 { pathList := make([]string, 0) for _, p := range parts { pathList = append(pathList, fmt.Sprintf("%s:%s", p.comp, p.path), ) } results = append(results, &CheckResult{ Name: CheckNameDisks, Err: fmt.Errorf( "multiple components %s are using the same partition %s as data dir", strings.Join(pathList, ","), key, ), }) } } return results } func flatPartitions(parts []insight.BlockDev) []insight.BlockDev { var flatBlk []insight.BlockDev for _, blk := range parts { if len(blk.SubDev) > 0 { flatBlk = append(flatBlk, flatPartitions(blk.SubDev)...) } // blocks with empty mount points are ignored if blk.Mount.MountPoint != "" { flatBlk = append(flatBlk, blk) } } return flatBlk } func sortPartitions(parts []insight.BlockDev) []insight.BlockDev { // The longest mount point is at top of the list sort.Slice(parts, func(i, j int) bool { return len(parts[i].Mount.MountPoint) > len(parts[j].Mount.MountPoint) }) return parts } // getDisk find the first block dev from the list that matches the given path func getDisk(parts []insight.BlockDev, fullpath string) *insight.BlockDev { for _, blk := range parts { if strings.HasPrefix(fullpath, blk.Mount.MountPoint) { return &blk } } return nil } // CheckFIOResult parses and checks the result of fio test func CheckFIOResult(rr, rw, lat []byte) []*CheckResult { var results []*CheckResult // check results for rand read test var rrRes map[string]any if err := json.Unmarshal(rr, &rrRes); err != nil { results = append(results, &CheckResult{ Name: CheckNameFio, Err: fmt.Errorf("error parsing result of random read test, %s", err), }) } else if jobs, ok := rrRes["jobs"]; ok { readRes := jobs.([]any)[0].(map[string]any)["read"] readIOPS := readRes.(map[string]any)["iops"] results = append(results, &CheckResult{ Name: CheckNameFio, Msg: fmt.Sprintf("IOPS of random read: %f", readIOPS.(float64)), }) } else { results = append(results, &CheckResult{ Name: CheckNameFio, Err: fmt.Errorf("error parsing result of random read test"), }) } // check results for rand read write var rwRes map[string]any if err := json.Unmarshal(rw, &rwRes); err != nil { results = append(results, &CheckResult{ Name: CheckNameFio, Err: fmt.Errorf("error parsing result of random read write test, %s", err), }) } else if jobs, ok := rwRes["jobs"]; ok { readRes := jobs.([]any)[0].(map[string]any)["read"] readIOPS := readRes.(map[string]any)["iops"] writeRes := jobs.([]any)[0].(map[string]any)["write"] writeIOPS := writeRes.(map[string]any)["iops"] results = append(results, &CheckResult{ Name: CheckNameFio, Msg: fmt.Sprintf("IOPS of random read: %f, write: %f", readIOPS.(float64), writeIOPS.(float64)), }) } else { results = append(results, &CheckResult{ Name: CheckNameFio, Err: fmt.Errorf("error parsing result of random read write test"), }) } // check results for read write latency var latRes map[string]any if err := json.Unmarshal(lat, &latRes); err != nil { results = append(results, &CheckResult{ Name: CheckNameFio, Err: fmt.Errorf("error parsing result of read write latency test, %s", err), }) } else if jobs, ok := latRes["jobs"]; ok { readRes := jobs.([]any)[0].(map[string]any)["read"] readLat := readRes.(map[string]any)["lat_ns"] readLatAvg := readLat.(map[string]any)["mean"] writeRes := jobs.([]any)[0].(map[string]any)["write"] writeLat := writeRes.(map[string]any)["lat_ns"] writeLatAvg := writeLat.(map[string]any)["mean"] results = append(results, &CheckResult{ Name: CheckNameFio, Msg: fmt.Sprintf("Latency of random read: %fns, write: %fns", readLatAvg.(float64), writeLatAvg.(float64)), }) } else { results = append(results, &CheckResult{ Name: CheckNameFio, Err: fmt.Errorf("error parsing result of read write latency test"), }) } return results } // CheckTHP checks THP in /sys/kernel/mm/transparent_hugepage/enabled func CheckTHP(ctx context.Context, e ctxt.Executor, sudo bool) *CheckResult { result := &CheckResult{ Name: CheckNameTHP, } m := module.NewShellModule(module.ShellModuleConfig{ Command: fmt.Sprintf(`if [ -d %[1]s ]; then cat %[1]s/enabled; fi`, "/sys/kernel/mm/transparent_hugepage"), Sudo: sudo, }) stdout, stderr, err := m.Execute(ctx, e) if err != nil { result.Err = fmt.Errorf("%w %s", err, stderr) return result } for line := range strings.SplitSeq(strings.Trim(string(stdout), "\n"), "\n") { if len(line) > 0 && !strings.Contains(line, "[never]") { result.Err = fmt.Errorf("THP is enabled, please disable it for best performance") return result } } result.Msg = "THP is disabled" return result } // CheckJRE checks if java command is available for TiSpark nodes func CheckJRE(ctx context.Context, e ctxt.Executor, host string, topo *spec.Specification) []*CheckResult { var results []*CheckResult topo.IterInstance(func(inst spec.Instance) { if inst.ComponentName() != spec.ComponentTiSpark { return } // check if java cli is available // the checkpoint part of context can't be shared between goroutines stdout, stderr, err := e.Execute(checkpoint.NewContext(ctx), "java -version", false) if err != nil { results = append(results, &CheckResult{ Name: CheckNameCommand, Err: fmt.Errorf("java not usable, %s", strings.Trim(string(stderr), "\n")), Msg: "JRE is not installed properly or not set in PATH", }) return } if len(stderr) > 0 { // java -version returns as below: // openjdk version "1.8.0_265" // openjdk version "11.0.8" 2020-07-14 line := strings.Split(string(stderr), "\n")[0] fields := strings.Split(line, `"`) ver := strings.TrimSpace(fields[1]) if strings.Compare(ver, "1.8") < 0 { results = append(results, &CheckResult{ Name: CheckNameCommand, Err: fmt.Errorf("java version %s is not supported, use Java 8 (1.8)+", ver), Msg: "Installed JRE is not Java 8+", }) } else { results = append(results, &CheckResult{ Name: CheckNameCommand, Msg: "java: " + strings.Split(string(stderr), "\n")[0], }) } } else { results = append(results, &CheckResult{ Name: CheckNameCommand, Err: fmt.Errorf("unknown output of java %s", stdout), Msg: "java: " + strings.Split(string(stdout), "\n")[0], Warn: true, }) } }) return results } // CheckDirPermission checks if the user can write to given path func CheckDirPermission(ctx context.Context, e ctxt.Executor, user, path string) []*CheckResult { var results []*CheckResult _, stderr, err := e.Execute(ctx, fmt.Sprintf( "/usr/bin/sudo -u %[1]s touch %[2]s/.tiup_cluster_check_file && rm -f %[2]s/.tiup_cluster_check_file", user, path, ), false) if err != nil || len(stderr) > 0 { results = append(results, &CheckResult{ Name: CheckNameDirPermission, Err: fmt.Errorf("unable to write to dir %s: %s", path, strings.Split(string(stderr), "\n")[0]), Msg: fmt.Sprintf("%s: %s", path, err), }) } else { results = append(results, &CheckResult{ Name: CheckNameDirPermission, Msg: fmt.Sprintf("%s is writable", path), }) } return results } // CheckDirIsExist check if the directory exists func CheckDirIsExist(ctx context.Context, e ctxt.Executor, path string) []*CheckResult { var results []*CheckResult if path == "" { return results } req, _, _ := e.Execute(ctx, fmt.Sprintf( "[ -e %s ] && echo 1", path, ), false) if strings.ReplaceAll(string(req), "\n", "") == "1" { results = append(results, &CheckResult{ Name: CheckNameDirExist, Err: fmt.Errorf("%s already exists", path), Msg: fmt.Sprintf("%s already exists", path), }) } return results } // CheckTimeZone performs checks if time zone is the same func CheckTimeZone(ctx context.Context, topo *spec.Specification, host string, rawData []byte) []*CheckResult { var results []*CheckResult var insightInfo, pd0insightInfo insight.Info if err := json.Unmarshal(rawData, &insightInfo); err != nil { return append(results, &CheckResult{ Name: CheckNameTimeZone, Err: err, }) } if len(topo.PDServers) < 1 { return append(results, &CheckResult{ Name: CheckNameTimeZone, Err: fmt.Errorf("no pd found"), }) } // skip compare with itself if topo.PDServers[0].Host == host { return nil } pd0stdout, _, _ := ctxt.GetInner(ctx).GetOutputs(topo.PDServers[0].Host) if err := json.Unmarshal(pd0stdout, &pd0insightInfo); err != nil { return append(results, &CheckResult{ Name: CheckNameTimeZone, Err: err, }) } timezone := insightInfo.SysInfo.Node.Timezone pd0timezone := pd0insightInfo.SysInfo.Node.Timezone if timezone == pd0timezone { results = append(results, &CheckResult{ Name: CheckNameTimeZone, Msg: "time zone is the same as the first PD machine: " + timezone, }) } else { results = append(results, &CheckResult{ Name: CheckNameTimeZone, Err: fmt.Errorf("time zone is %s, but the firt PD is %s", timezone, pd0timezone), }) } return results } tiup-1.16.3/pkg/cluster/operation/destroy.go000066400000000000000000000475011505422223000210470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "bytes" "context" "crypto/tls" "fmt" "os" "path/filepath" "strings" "time" "github.com/fatih/color" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/cluster/module" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" ) // Destroy the cluster. func Destroy( ctx context.Context, cluster spec.Topology, options Options, ) error { coms := cluster.ComponentsByStopOrder() instCount := map[string]int{} cluster.IterInstance(func(inst spec.Instance) { instCount[inst.GetManageHost()]++ }) for _, com := range coms { insts := com.Instances() err := DestroyComponent(ctx, insts, cluster, options) if err != nil && !options.Force { return perrs.Annotatef(err, "failed to destroy %s", com.Name()) } for _, inst := range insts { instCount[inst.GetManageHost()]-- if instCount[inst.GetManageHost()] == 0 { if cluster.GetMonitoredOptions() != nil { if err := DestroyMonitored(ctx, inst, cluster.GetMonitoredOptions(), options.OptTimeout, cluster.BaseTopo().GlobalOptions.SystemdMode); err != nil && !options.Force { return err } } } } } gOpts := cluster.BaseTopo().GlobalOptions // Delete all global deploy directory for host := range instCount { if err := DeleteGlobalDirs(ctx, host, gOpts); err != nil { return nil } } // after all things done, try to remove SSH public key for host := range instCount { if err := DeletePublicKey(ctx, host); err != nil { return nil } } return nil } // StopAndDestroyInstance stop and destroy the instance, // if this instance is the host's last one, and the host has monitor deployed, // we need to destroy the monitor, too func StopAndDestroyInstance( ctx context.Context, cluster spec.Topology, instance spec.Instance, options Options, forceStop bool, destroyNode bool, tlsCfg *tls.Config, ) error { ignoreErr := options.Force compName := instance.ComponentName() noAgentHosts := set.NewStringSet() logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) cluster.IterInstance(func(inst spec.Instance) { if inst.IgnoreMonitorAgent() { noAgentHosts.Insert(inst.GetManageHost()) } }) // just try to stop and destroy if err := StopComponent( ctx, cluster, []spec.Instance{instance}, noAgentHosts, options, forceStop, /* forceStop */ false, /* evictLeader */ tlsCfg, /* when the forceStop is false, this is use for TiCDC graceful shutdown */ ); err != nil { if !ignoreErr { return perrs.Annotatef(err, "failed to stop %s", compName) } logger.Warnf("failed to stop %s: %v", compName, err) } if err := DestroyComponent(ctx, []spec.Instance{instance}, cluster, options); err != nil { if !ignoreErr { return perrs.Annotatef(err, "failed to destroy %s", compName) } logger.Warnf("failed to destroy %s: %v", compName, err) } if destroyNode { // monitoredOptions for dm cluster is nil monitoredOptions := cluster.GetMonitoredOptions() if monitoredOptions != nil && !instance.IgnoreMonitorAgent() { if err := StopMonitored(ctx, []string{instance.GetManageHost()}, noAgentHosts, monitoredOptions, options.OptTimeout, string(cluster.BaseTopo().GlobalOptions.SystemdMode)); err != nil { if !ignoreErr { return perrs.Annotatef(err, "failed to stop monitor") } logger.Warnf("failed to stop %s: %v", "monitor", err) } if err := DestroyMonitored(ctx, instance, monitoredOptions, options.OptTimeout, cluster.BaseTopo().GlobalOptions.SystemdMode); err != nil { if !ignoreErr { return perrs.Annotatef(err, "failed to destroy monitor") } logger.Warnf("failed to destroy %s: %v", "monitor", err) } } if err := DeletePublicKey(ctx, instance.GetManageHost()); err != nil { if !ignoreErr { return perrs.Annotatef(err, "failed to delete public key") } logger.Warnf("failed to delete public key") } } return nil } // DeleteGlobalDirs deletes all global directories if they are empty func DeleteGlobalDirs(ctx context.Context, host string, options *spec.GlobalOptions) error { if options == nil { return nil } e := ctxt.GetInner(ctx).Get(host) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) logger.Infof("Clean global directories %s", host) for _, dir := range []string{options.LogDir, options.DeployDir, options.DataDir} { if dir == "" { continue } dir = spec.Abs(options.User, dir) logger.Infof("\tClean directory %s on instance %s", dir, host) c := module.ShellModuleConfig{ Command: fmt.Sprintf("rmdir %s > /dev/null 2>&1 || true", dir), Chdir: "", UseShell: false, } shell := module.NewShellModule(c) stdout, stderr, err := shell.Execute(ctx, e) if len(stdout) > 0 { fmt.Println(string(stdout)) } if len(stderr) > 0 { logger.Errorf("%s", string(stderr)) } if err != nil { return perrs.Annotatef(err, "failed to clean directory %s on: %s", dir, host) } } logger.Infof("Clean global directories %s success", host) return nil } // DeletePublicKey deletes the SSH public key from host func DeletePublicKey(ctx context.Context, host string) error { e := ctxt.GetInner(ctx).Get(host) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) logger.Infof("Delete public key %s", host) _, pubKeyPath := ctxt.GetInner(ctx).GetSSHKeySet() publicKey, err := os.ReadFile(pubKeyPath) if err != nil { return perrs.Trace(err) } pubKey := string(bytes.TrimSpace(publicKey)) pubKey = strings.ReplaceAll(pubKey, "/", "\\/") pubKeysFile := executor.FindSSHAuthorizedKeysFile(ctx, e) // delete the public key with Linux `sed` toolkit c := module.ShellModuleConfig{ Command: fmt.Sprintf("sed -i '/%s/d' %s", pubKey, pubKeysFile), UseShell: false, } shell := module.NewShellModule(c) stdout, stderr, err := shell.Execute(ctx, e) if len(stdout) > 0 { fmt.Println(string(stdout)) } if len(stderr) > 0 { logger.Errorf("%s", string(stderr)) } if err != nil { return perrs.Annotatef(err, "failed to delete pulblic key on: %s", host) } logger.Infof("Delete public key %s success", host) return nil } // DestroyMonitored destroy the monitored service. func DestroyMonitored(ctx context.Context, inst spec.Instance, options *spec.MonitoredOptions, timeout uint64, systemdMode spec.SystemdMode) error { e := ctxt.GetInner(ctx).Get(inst.GetManageHost()) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) logger.Infof("Destroying monitored %s", inst.GetManageHost()) logger.Infof("\tDestroying instance %s", inst.GetManageHost()) // Stop by systemd. delPaths := make([]string, 0) delPaths = append(delPaths, options.DataDir) delPaths = append(delPaths, options.LogDir) // In TiDB-Ansible, deploy dir are shared by all components on the same // host, so not deleting it. if !inst.IsImported() { delPaths = append(delPaths, options.DeployDir) } else { logger.Warnf("Monitored deploy dir %s not deleted for TiDB-Ansible imported instance %s.", options.DeployDir, inst.InstanceName()) } systemdDir := "/etc/systemd/system/" sudo := true if systemdMode == spec.UserMode { systemdDir = "~/.config/systemd/user/" sudo = false } delPaths = append(delPaths, fmt.Sprintf("%s%s-%d.service", systemdDir, spec.ComponentNodeExporter, options.NodeExporterPort)) delPaths = append(delPaths, fmt.Sprintf("%s%s-%d.service", systemdDir, spec.ComponentBlackboxExporter, options.BlackboxExporterPort)) c := module.ShellModuleConfig{ Command: fmt.Sprintf("rm -rf %s;", strings.Join(delPaths, " ")), Sudo: sudo, // the .service files are in a directory owned by root Chdir: "", UseShell: false, } shell := module.NewShellModule(c) stdout, stderr, err := shell.Execute(ctx, e) if len(stdout) > 0 { fmt.Println(string(stdout)) } if len(stderr) > 0 { logger.Errorf("%s", string(stderr)) } if err != nil { return perrs.Annotatef(err, "failed to destroy monitored: %s", inst.GetManageHost()) } if err := spec.PortStopped(ctx, e, options.NodeExporterPort, timeout); err != nil { str := fmt.Sprintf("%s failed to destroy node exportoer: %s", inst.GetManageHost(), err) logger.Errorf("%s", str) return perrs.Annotatef(err, "%s", str) } if err := spec.PortStopped(ctx, e, options.BlackboxExporterPort, timeout); err != nil { str := fmt.Sprintf("%s failed to destroy blackbox exportoer: %s", inst.GetManageHost(), err) logger.Errorf("%s", str) return perrs.Annotatef(err, "%s", str) } logger.Infof("Destroy monitored on %s success", inst.GetManageHost()) return nil } // CleanupComponent cleanup the instances func CleanupComponent(ctx context.Context, delFileMaps map[string]set.StringSet, sudo bool) error { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) for host, delFiles := range delFileMaps { e := ctxt.GetInner(ctx).Get(host) logger.Infof("Cleanup instance %s", host) logger.Debugf("Deleting paths on %s: %s", host, strings.Join(delFiles.Slice(), " ")) c := module.ShellModuleConfig{ Command: fmt.Sprintf("rm -rf %s;", strings.Join(delFiles.Slice(), " ")), Sudo: sudo, // the .service files are in a directory owned by root Chdir: "", UseShell: true, } shell := module.NewShellModule(c) stdout, stderr, err := shell.Execute(ctx, e) if len(stdout) > 0 { fmt.Println(string(stdout)) } if len(stderr) > 0 { logger.Errorf("%s", string(stderr)) } if err != nil { return perrs.Annotatef(err, "failed to cleanup: %s", host) } logger.Infof("Cleanup %s success", host) } return nil } // DestroyComponent destroy the instances. func DestroyComponent(ctx context.Context, instances []spec.Instance, cls spec.Topology, options Options) error { if len(instances) == 0 { return nil } logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) name := instances[0].ComponentName() logger.Infof("Destroying component %s", name) retainDataRoles := set.NewStringSet(options.RetainDataRoles...) retainDataNodes := set.NewStringSet(options.RetainDataNodes...) for _, ins := range instances { // Some data of instances will be retained dataRetained := retainDataRoles.Exist(ins.ComponentName()) || retainDataNodes.Exist(ins.ID()) || retainDataNodes.Exist(ins.GetHost()) || retainDataRoles.Exist(ins.GetManageHost()) e := ctxt.GetInner(ctx).Get(ins.GetManageHost()) logger.Infof("\tDestroying instance %s", ins.GetManageHost()) var dataDirs []string if len(ins.DataDir()) > 0 { dataDirs = strings.Split(ins.DataDir(), ",") } deployDir := ins.DeployDir() delPaths := set.NewStringSet() // Retain the deploy directory if the users want to retain the data directory // and the data directory is a sub-directory of deploy directory keepDeployDir := false for _, dataDir := range dataDirs { // Don't delete the parent directory if any sub-directory retained keepDeployDir = (dataRetained && strings.HasPrefix(dataDir, deployDir)) || keepDeployDir if !dataRetained && cls.CountDir(ins.GetManageHost(), dataDir) == 1 { // only delete path if it is not used by any other instance in the cluster delPaths.Insert(dataDir) } } // For TiFlash, we need to delete storage.remote.cache.dir if ins.ComponentName() == spec.ComponentTiFlash { tiflashInstance := ins.(*spec.TiFlashInstance) tiflashSpec := tiflashInstance.InstanceSpec.(*spec.TiFlashSpec) if remoteCacheDir, ok := tiflashSpec.Config[spec.TiFlashRemoteCacheDir]; ok { delPaths.Insert(remoteCacheDir.(string)) } } logDir := ins.LogDir() // In TiDB-Ansible, deploy dir are shared by all components on the same // host, so not deleting it. if ins.IsImported() { // not deleting files for imported clusters if !strings.HasPrefix(logDir, ins.DeployDir()) && cls.CountDir(ins.GetManageHost(), logDir) == 1 { delPaths.Insert(logDir) } logger.Warnf("Deploy dir %s not deleted for TiDB-Ansible imported instance %s.", ins.DeployDir(), ins.InstanceName()) } else { if keepDeployDir { delPaths.Insert(filepath.Join(deployDir, "conf")) delPaths.Insert(filepath.Join(deployDir, "bin")) delPaths.Insert(filepath.Join(deployDir, "scripts")) if cls.BaseTopo().GlobalOptions.TLSEnabled { delPaths.Insert(filepath.Join(deployDir, spec.TLSCertKeyDir)) } // only delete path if it is not used by any other instance in the cluster if strings.HasPrefix(logDir, deployDir) && cls.CountDir(ins.GetManageHost(), logDir) == 1 { delPaths.Insert(logDir) } } else { // only delete path if it is not used by any other instance in the cluster if cls.CountDir(ins.GetManageHost(), logDir) == 1 { delPaths.Insert(logDir) } if cls.CountDir(ins.GetManageHost(), ins.DeployDir()) == 1 { delPaths.Insert(ins.DeployDir()) } } } // check for deploy dir again, to avoid unused files being left on disk dpCnt := 0 for _, dir := range delPaths.Slice() { if strings.HasPrefix(dir, deployDir+"/") { // only check subdir of deploy dir dpCnt++ } } if !ins.IsImported() && cls.CountDir(ins.GetManageHost(), deployDir)-dpCnt == 1 { delPaths.Insert(deployDir) } systemdDir := "/etc/systemd/system/" sudo := true if cls.BaseTopo().GlobalOptions.SystemdMode == spec.UserMode { systemdDir = "~/.config/systemd/user/" sudo = false } if svc := ins.ServiceName(); svc != "" { delPaths.Insert(fmt.Sprintf("%s%s", systemdDir, svc)) } logger.Debugf("Deleting paths on %s: %s", ins.GetManageHost(), strings.Join(delPaths.Slice(), " ")) for _, delPath := range delPaths.Slice() { c := module.ShellModuleConfig{ Command: fmt.Sprintf("rm -rf %s;", delPath), Sudo: sudo, // the .service files are in a directory owned by root Chdir: "", UseShell: false, } shell := module.NewShellModule(c) _, _, err := shell.Execute(ctx, e) if err != nil { // Ignore error and continue.For example, deleting a mount point will result in a "Device or resource busy" error. logger.Warnf("%s", color.YellowString("Warn: failed to delete path \"%s\" on %s.Please check this error message and manually delete if necessary.\nerrmsg: %s", delPath, ins.GetManageHost(), err)) } } logger.Infof("Destroy %s finished", ins.GetManageHost()) logger.Infof("- Destroy %s paths: %v", ins.ComponentName(), delPaths.Slice()) } return nil } // DestroyTombstone remove the tombstone node in spec and destroy them. // If returNodesOnly is true, it will only return the node id that can be destroy. func DestroyTombstone( ctx context.Context, cluster *spec.Specification, returNodesOnly bool, options Options, tlsCfg *tls.Config, ) (nodes []string, err error) { return DestroyClusterTombstone(ctx, cluster, returNodesOnly, options, tlsCfg) } // DestroyClusterTombstone remove the tombstone node in spec and destroy them. // If returNodesOnly is true, it will only return the node id that can be destroy. func DestroyClusterTombstone( ctx context.Context, cluster *spec.Specification, returNodesOnly bool, options Options, tlsCfg *tls.Config, ) (nodes []string, err error) { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) instCount := map[string]int{} for _, component := range cluster.ComponentsByStartOrder() { for _, instance := range component.Instances() { instCount[instance.GetManageHost()]++ } } var pdEndpoints []string forcePDEndpoints := os.Getenv(EnvNamePDEndpointOverwrite) // custom set PD endpoint list if forcePDEndpoints != "" { pdEndpoints = strings.Split(forcePDEndpoints, ",") logger.Warnf("%s is set, using %s as PD endpoints", EnvNamePDEndpointOverwrite, pdEndpoints) } else { pdEndpoints = cluster.GetPDListWithManageHost() } var pdClient = api.NewPDClient(ctx, pdEndpoints, 10*time.Second, tlsCfg) tcpProxy := proxy.GetTCPProxy() if tcpProxy != nil { closeC := tcpProxy.Run(pdEndpoints) defer tcpProxy.Close(closeC) pdEndpoints = tcpProxy.GetEndpoints() } binlogClient, err := api.NewBinlogClient(pdEndpoints, 5*time.Second, tlsCfg) if err != nil { return nil, err } filterID := func(instance []spec.Instance, id string) (res []spec.Instance) { for _, ins := range instance { if ins.ID() == id { res = append(res, ins) } } return } maybeDestroyMonitor := func(instances []spec.Instance, id string) error { instances = filterID(instances, id) for _, instance := range instances { instCount[instance.GetManageHost()]-- err := StopAndDestroyInstance(ctx, cluster, instance, options, true, instCount[instance.GetManageHost()] == 0, tlsCfg) if err != nil { if !options.Force { return err } logger.Warnf("failed to stop and destroy instance %s (%s), ignored as --force is set, you may need to manually cleanup the files", instance, err) } } return nil } var kvServers []*spec.TiKVSpec for _, s := range cluster.TiKVServers { if !s.Offline { kvServers = append(kvServers, s) continue } id := utils.JoinHostPort(s.Host, s.Port) tombstone, err := pdClient.IsTombStone(id) if err != nil { return nil, err } if !tombstone { kvServers = append(kvServers, s) continue } nodes = append(nodes, id) if returNodesOnly { continue } instances := (&spec.TiKVComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } } var flashServers []*spec.TiFlashSpec for _, s := range cluster.TiFlashServers { if !s.Offline { flashServers = append(flashServers, s) continue } id := utils.JoinHostPort(s.Host, s.FlashServicePort) tombstone, err := pdClient.IsTombStone(id) if err != nil { return nil, err } if !tombstone { flashServers = append(flashServers, s) continue } nodes = append(nodes, id) if returNodesOnly { continue } instances := (&spec.TiFlashComponent{Topology: cluster}).Instances() id = utils.JoinHostPort(s.Host, s.GetMainPort()) if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } } var pumpServers []*spec.PumpSpec for _, s := range cluster.PumpServers { if !s.Offline { pumpServers = append(pumpServers, s) continue } id := utils.JoinHostPort(s.Host, s.Port) tombstone, err := binlogClient.IsPumpTombstone(ctx, id) if err != nil { return nil, err } if !tombstone { pumpServers = append(pumpServers, s) continue } nodes = append(nodes, id) if returNodesOnly { continue } instances := (&spec.PumpComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } } var drainerServers []*spec.DrainerSpec for _, s := range cluster.Drainers { if !s.Offline { drainerServers = append(drainerServers, s) continue } id := utils.JoinHostPort(s.Host, s.Port) tombstone, err := binlogClient.IsDrainerTombstone(ctx, id) if err != nil { return nil, err } if !tombstone { drainerServers = append(drainerServers, s) continue } nodes = append(nodes, id) if returNodesOnly { continue } instances := (&spec.DrainerComponent{Topology: cluster}).Instances() if err := maybeDestroyMonitor(instances, id); err != nil { return nil, err } } if returNodesOnly { return } cluster.TiKVServers = kvServers cluster.TiFlashServers = flashServers cluster.PumpServers = pumpServers cluster.Drainers = drainerServers return } tiup-1.16.3/pkg/cluster/operation/download.go000066400000000000000000000034311505422223000211570ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "fmt" "os" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/utils" ) // Download the specific version of a component from // the repository, there is nothing to do if the specified version exists. func Download(component, nodeOS, arch string, version string) error { if component == "" { return errors.New("component name not specified") } if version == "" { return errors.Errorf("version not specified for component '%s'", component) } resName := fmt.Sprintf("%s-%s", component, version) fileName := fmt.Sprintf("%s-%s-%s.tar.gz", resName, nodeOS, arch) targetPath := spec.ProfilePath(spec.TiUPPackageCacheDir, fileName) if err := utils.MkdirAll(spec.ProfilePath(spec.TiUPPackageCacheDir), 0755); err != nil { return err } repo, err := clusterutil.NewRepository(nodeOS, arch) if err != nil { return err } // Download from repository if not exists if utils.IsNotExist(targetPath) { if err := repo.DownloadComponent(component, version, targetPath); err != nil { return err } } else if version != "nightly" { if err := repo.VerifyComponent(component, version, targetPath); err != nil { os.Remove(targetPath) } } return nil } tiup-1.16.3/pkg/cluster/operation/operation.go000066400000000000000000000110741505422223000213520ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "fmt" "os" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/set" ) // environment variable names that used to interrupt operations const ( EnvNameSkipScaleInTopoCheck = "SKIP_SCALEIN_TOPO_CHECK" EnvNamePDEndpointOverwrite = "FORCE_PD_ENDPOINTS" ) // Options represents the operation options type Options struct { Roles []string Nodes []string Force bool // Option for upgrade/tls subcommand SSHTimeout uint64 // timeout in seconds when connecting an SSH server OptTimeout uint64 // timeout in seconds for operations that support it, not to confuse with SSH timeout APITimeout uint64 // timeout in seconds for API operations that support it, like transferring store leader IgnoreConfigCheck bool // should we ignore the config check result after init config NativeSSH bool // should use native ssh client or builtin easy ssh (deprecated, should use SSHType) SSHType executor.SSHType // the ssh type: 'builtin', 'system', 'none' Concurrency int // max number of parallel tasks to run SSHProxyHost string // the ssh proxy host SSHProxyPort int // the ssh proxy port SSHProxyUser string // the ssh proxy user SSHProxyIdentity string // the ssh proxy identity file SSHProxyUsePassword bool // use password instead of identity file for ssh proxy connection SSHProxyTimeout uint64 // timeout in seconds when connecting the proxy host SSHCustomScripts SSHCustomScripts // custom scripts to be executed during the operation // What type of things should we cleanup in clean command CleanupData bool // should we cleanup data CleanupLog bool // should we clenaup log CleanupAuditLog bool // should we clenaup tidb server auit log // Some data will be retained when destroying instances RetainDataRoles []string RetainDataNodes []string DisplayMode string // the output format Operation Operation } // SSHCustomScripts represents the custom ssh script set to be executed during cluster operations type SSHCustomScripts struct { BeforeRestartInstance SSHCustomScript AfterRestartInstance SSHCustomScript } // SSHCustomScript represents a custom ssh script to be executed during cluster operations type SSHCustomScript struct { Raw string } // Command returns the ssh command in string format func (s SSHCustomScript) Command() string { b, err := os.ReadFile(s.Raw) if err != nil { return s.Raw } return string(b) } // Operation represents the type of cluster operation type Operation byte // Operation represents the kind of cluster operation const ( // StartOperation Operation = iota // StopOperation RestartOperation Operation = iota DestroyOperation UpgradeOperation ScaleInOperation ScaleOutOperation DestroyTombstoneOperation ) var opStringify = [...]string{ "StartOperation", "StopOperation", "RestartOperation", "DestroyOperation", "UpgradeOperation", "ScaleInOperation", "ScaleOutOperation", "DestroyTombstoneOperation", } func (op Operation) String() string { if op <= DestroyTombstoneOperation { return opStringify[op] } return fmt.Sprintf("unknonw-op(%d)", op) } // FilterComponent filter components by set func FilterComponent(comps []spec.Component, components set.StringSet) (res []spec.Component) { if len(components) == 0 { res = comps return } for _, c := range comps { var role string switch c.Name() { case spec.ComponentTiSpark: role = c.Role() default: role = c.Name() } if !components.Exist(role) { continue } res = append(res, c) } return } // FilterInstance filter instances by set func FilterInstance(instances []spec.Instance, nodes set.StringSet) (res []spec.Instance) { if len(nodes) == 0 { res = instances return } for _, c := range instances { if !nodes.Exist(c.ID()) { continue } res = append(res, c) } return } tiup-1.16.3/pkg/cluster/operation/pd_member.go000066400000000000000000000046321505422223000213060ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "context" "crypto/tls" "fmt" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/spec" ) // UpdatePDMember is used to update pd cluster member type UpdatePDMember struct { cluster string tlsCfg *tls.Config metadata spec.Metadata enableTLS bool } // SetPDMember set the member of pd-etcd func SetPDMember(ctx context.Context, clusterName string, enable bool, tlsCfg *tls.Config, meta spec.Metadata) error { u := &UpdatePDMember{ cluster: clusterName, tlsCfg: tlsCfg, metadata: meta, enableTLS: enable, } return u.Execute(ctx) } // Execute implements the Task interface func (u *UpdatePDMember) Execute(ctx context.Context) error { // connection etcd etcdClient, err := u.metadata.GetTopology().(*spec.Specification).GetEtcdClient(u.tlsCfg) if err != nil { return err } // etcd client defaults to wait forever // if all pd were down, don't hang forever ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() // txn := etcdClient.Txn(ctx) memberList, err := etcdClient.MemberList(ctx) if err != nil { return err } for _, member := range memberList.Members { _, err := etcdClient.MemberUpdate(ctx, member.GetID(), u.updatePeerURLs(member.PeerURLs)) if err != nil { return err } } // get member list after update memberList, err = etcdClient.MemberList(ctx) if err != nil { return err } for _, member := range memberList.Members { fmt.Printf("\tUpdate %s peerURLs: %v\n", member.Name, member.PeerURLs) } return nil } // updatePeerURLs http->https or https->http func (u *UpdatePDMember) updatePeerURLs(peerURLs []string) []string { newPeerURLs := []string{} for _, url := range peerURLs { if u.enableTLS { newPeerURLs = append(newPeerURLs, strings.Replace(url, "http://", "https://", 1)) } else { newPeerURLs = append(newPeerURLs, strings.Replace(url, "https://", "http://", 1)) } } return newPeerURLs } tiup-1.16.3/pkg/cluster/operation/scale_in.go000066400000000000000000000346371505422223000211410ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "context" "crypto/tls" "encoding/json" "os" "strings" "time" "slices" "github.com/fatih/color" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/sync/errgroup" ) // TODO: We can make drainer not async. var asyncOfflineComps = set.NewStringSet(spec.ComponentPump, spec.ComponentTiKV, spec.ComponentTiFlash, spec.ComponentDrainer) // AsyncNodes return all nodes async destroy or not. func AsyncNodes(spec *spec.Specification, nodes []string, async bool) []string { var asyncNodes []string var notAsyncNodes []string inNodes := func(n string) bool { return slices.Contains(nodes, n) } for _, c := range spec.ComponentsByStartOrder() { for _, ins := range c.Instances() { if !inNodes(ins.ID()) { continue } if asyncOfflineComps.Exist(ins.ComponentName()) { asyncNodes = append(asyncNodes, ins.ID()) } else { notAsyncNodes = append(notAsyncNodes, ins.ID()) } } } if async { return asyncNodes } return notAsyncNodes } // ScaleIn scales in the cluster func ScaleIn( ctx context.Context, cluster *spec.Specification, options Options, tlsCfg *tls.Config, ) error { return ScaleInCluster(ctx, cluster, options, tlsCfg) } // ScaleInCluster scales in the cluster // //revive:disable func ScaleInCluster( ctx context.Context, cluster *spec.Specification, options Options, tlsCfg *tls.Config, ) error { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) // instances by uuid instances := map[string]spec.Instance{} instCount := map[string]int{} // make sure all nodeIds exists in topology for _, component := range cluster.ComponentsByStartOrder() { for _, instance := range component.Instances() { instances[instance.ID()] = instance instCount[instance.GetManageHost()]++ } } // Clean components deletedDiff := map[string][]spec.Instance{} deletedNodes := set.NewStringSet(options.Nodes...) for nodeID := range deletedNodes { inst, found := instances[nodeID] if !found { return errors.Errorf("cannot find node id '%s' in topology", nodeID) } deletedDiff[inst.ComponentName()] = append(deletedDiff[inst.ComponentName()], inst) } skipTopoCheck := false if v := os.Getenv(EnvNameSkipScaleInTopoCheck); v != "" { // any value except empty will work as "true" skipTopoCheck = true } if skipTopoCheck { logger.Warnf("%s is set, topology checks ignored, the cluster might be broken after the operations!", EnvNameSkipScaleInTopoCheck) if ok, input := tui.PromptForConfirmYes("Are you sure to continue? [y/N]"); !ok { return errors.Errorf("user aborted with '%s'", input) } } else { // Cannot delete all PD servers if len(deletedDiff[spec.ComponentPD]) == len(cluster.PDServers) { return errors.New("cannot delete all PD servers") } // Cannot delete all TiKV servers if len(deletedDiff[spec.ComponentTiKV]) == len(cluster.TiKVServers) { return errors.New("cannot delete all TiKV servers") } // Cannot delete TiSpark master server if there's any TiSpark worker remains if len(deletedDiff[spec.ComponentTiSpark]) > 0 { var cntDiffTiSparkMaster int var cntDiffTiSparkWorker int for _, inst := range deletedDiff[spec.ComponentTiSpark] { switch inst.Role() { case spec.RoleTiSparkMaster: cntDiffTiSparkMaster++ case spec.RoleTiSparkWorker: cntDiffTiSparkWorker++ } } if cntDiffTiSparkMaster == len(cluster.TiSparkMasters) && cntDiffTiSparkWorker < len(cluster.TiSparkWorkers) { return errors.New("cannot delete tispark master when there are workers left") } } } var pdEndpoints []string forcePDEndpoints := os.Getenv(EnvNamePDEndpointOverwrite) // custom set PD endpoint list if forcePDEndpoints != "" { pdEndpoints = strings.Split(forcePDEndpoints, ",") logger.Warnf("%s is set, using %s as PD endpoints", EnvNamePDEndpointOverwrite, pdEndpoints) } else { for _, instance := range (&spec.PDComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { pdEndpoints = append(pdEndpoints, Addr(instance)) } } } // At least a PD server exists if len(pdEndpoints) == 0 { return errors.New("cannot find available PD instance") } pdClient := api.NewPDClient(ctx, pdEndpoints, 10*time.Second, tlsCfg) tcpProxy := proxy.GetTCPProxy() if tcpProxy != nil { closeC := tcpProxy.Run(pdEndpoints) defer tcpProxy.Close(closeC) pdEndpoints = tcpProxy.GetEndpoints() } binlogClient, err := api.NewBinlogClient(pdEndpoints, 5*time.Second, tlsCfg) if err != nil { return err } if options.Force { for _, component := range cluster.ComponentsByStartOrder() { for _, instance := range component.Instances() { if !deletedNodes.Exist(instance.ID()) { continue } compName := component.Name() if compName != spec.ComponentPump && compName != spec.ComponentDrainer { if err := deleteMember(ctx, component, instance, pdClient, binlogClient, options.APITimeout); err != nil { logger.Warnf("failed to delete %s: %v", compName, err) } } instCount[instance.GetManageHost()]-- if err := StopAndDestroyInstance(ctx, cluster, instance, options, true, instCount[instance.GetManageHost()] == 0, tlsCfg); err != nil { logger.Warnf("failed to stop/destroy %s: %v", compName, err) } // directly update pump&drainer 's state as offline in etcd. if binlogClient != nil { id := instance.ID() if compName == spec.ComponentPump { if err := binlogClient.UpdatePumpState(ctx, id, "offline"); err != nil { logger.Warnf("failed to update %s state as offline: %v", compName, err) } } else if compName == spec.ComponentDrainer { if err := binlogClient.UpdateDrainerState(ctx, id, "offline"); err != nil { logger.Warnf("failed to update %s state as offline: %v", compName, err) } } } } } return nil } // TODO if binlog is switch on, cannot delete all pump servers. var tiflashInstances []spec.Instance for _, instance := range (&spec.TiFlashComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { tiflashInstances = append(tiflashInstances, instance) } } if len(tiflashInstances) > 0 { var tikvInstances []spec.Instance for _, instance := range (&spec.TiKVComponent{Topology: cluster}).Instances() { if !deletedNodes.Exist(instance.ID()) { tikvInstances = append(tikvInstances, instance) } } type replicateConfig struct { MaxReplicas int `json:"max-replicas"` } var config replicateConfig bytes, err := pdClient.GetReplicateConfig() if err != nil { return err } if err := json.Unmarshal(bytes, &config); err != nil { return err } maxReplicas := config.MaxReplicas if len(tikvInstances) < maxReplicas { logger.Warnf("TiKV instance number %d will be less than max-replicas setting after scale-in. TiFlash won't be able to receive data from leader before TiKV instance number reach %d", len(tikvInstances), maxReplicas) } } cdcInstances := make([]spec.Instance, 0) // Delete member from cluster for _, component := range cluster.ComponentsByStartOrder() { deferInstances := make([]spec.Instance, 0) for _, instance := range component.Instances() { if !deletedNodes.Exist(instance.ID()) { continue } // skip cdc at the moment, handle them separately. if component.Role() == spec.ComponentCDC { cdcInstances = append(cdcInstances, instance) continue } if component.Role() == spec.ComponentPD { // defer PD leader to be scale-in after others isLeader, err := instance.(*spec.PDInstance).IsLeader(ctx, cluster, int(options.APITimeout), tlsCfg) if err != nil { logger.Warnf("cannot found pd leader, ignore: %s", err) return err } if isLeader { deferInstances = append(deferInstances, instance) logger.Debugf("Deferred scale-in of PD leader %s", instance.ID()) continue } } err := deleteMember(ctx, component, instance, pdClient, binlogClient, options.APITimeout) if err != nil { return errors.Trace(err) } if !asyncOfflineComps.Exist(instance.ComponentName()) { instCount[instance.GetManageHost()]-- if err := StopAndDestroyInstance(ctx, cluster, instance, options, false, instCount[instance.GetManageHost()] == 0, tlsCfg); err != nil { return err } } else { logger.Warnf("%s", color.YellowString("The component `%s` will become tombstone, maybe exists in several minutes or hours, after that you can use the prune command to clean it", component.Name())) } } // process deferred instances for _, instance := range deferInstances { // actually, it must be the pd leader at the moment, so the `PreRestart` always triggered. rollingInstance, ok := instance.(spec.RollingUpdateInstance) if ok { if err := rollingInstance.PreRestart(ctx, cluster, int(options.APITimeout), tlsCfg, nil); err != nil { return errors.Trace(err) } } err := deleteMember(ctx, component, instance, pdClient, binlogClient, options.APITimeout) if err != nil { return errors.Trace(err) } if !asyncOfflineComps.Exist(instance.ComponentName()) { instCount[instance.GetManageHost()]-- if err := StopAndDestroyInstance(ctx, cluster, instance, options, false, instCount[instance.GetManageHost()] == 0, tlsCfg); err != nil { return err } } else { logger.Warnf("%s", color.YellowString("The component `%s` will become tombstone, maybe exists in several minutes or hours, after that you can use the prune command to clean it", component.Name())) } } } if len(cdcInstances) != 0 { err := scaleInCDC(ctx, cluster, cdcInstances, tlsCfg, options, instCount) if err != nil { return errors.Trace(err) } } for i := 0; i < len(cluster.TiKVServers); i++ { s := cluster.TiKVServers[i] id := utils.JoinHostPort(s.Host, s.Port) if !deletedNodes.Exist(id) { continue } s.Offline = true cluster.TiKVServers[i] = s } for i := 0; i < len(cluster.TiFlashServers); i++ { s := cluster.TiFlashServers[i] id := utils.JoinHostPort(s.Host, s.TCPPort) if !deletedNodes.Exist(id) { continue } s.Offline = true cluster.TiFlashServers[i] = s } for i := 0; i < len(cluster.PumpServers); i++ { s := cluster.PumpServers[i] id := utils.JoinHostPort(s.Host, s.Port) if !deletedNodes.Exist(id) { continue } s.Offline = true cluster.PumpServers[i] = s } for i := 0; i < len(cluster.Drainers); i++ { s := cluster.Drainers[i] id := utils.JoinHostPort(s.Host, s.Port) if !deletedNodes.Exist(id) { continue } s.Offline = true cluster.Drainers[i] = s } return nil } func deleteMember( ctx context.Context, component spec.Component, instance spec.Instance, pdClient *api.PDClient, binlogClient *api.BinlogClient, timeoutSecond uint64, ) error { timeoutOpt := &utils.RetryOption{ Timeout: time.Second * time.Duration(timeoutSecond), Delay: time.Second * 5, } switch component.Name() { case spec.ComponentTiKV: if err := pdClient.DelStore(instance.ID(), timeoutOpt); err != nil { return err } case spec.ComponentTiFlash: addr := utils.JoinHostPort(instance.GetHost(), instance.(*spec.TiFlashInstance).GetServicePort()) if err := pdClient.DelStore(addr, timeoutOpt); err != nil { return err } case spec.ComponentPD: if err := pdClient.DelPD(instance.(*spec.PDInstance).Name, timeoutOpt); err != nil { return err } case spec.ComponentDrainer: addr := utils.JoinHostPort(instance.GetHost(), instance.GetPort()) err := binlogClient.OfflineDrainer(ctx, addr) if err != nil { return err } case spec.ComponentPump: addr := utils.JoinHostPort(instance.GetHost(), instance.GetPort()) err := binlogClient.OfflinePump(ctx, addr) if err != nil { return err } } return nil } func scaleInCDC( ctx context.Context, cluster *spec.Specification, instances []spec.Instance, tlsCfg *tls.Config, options Options, instCount map[string]int, ) error { logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) // if all cdc instances are selected, just stop all instances by force if len(instances) == len(cluster.CDCServers) { g, _ := errgroup.WithContext(ctx) for _, ins := range instances { instCount[ins.GetManageHost()]++ destroyNode := instCount[ins.GetManageHost()] == 0 g.Go(func() error { return StopAndDestroyInstance(ctx, cluster, ins, options, true, destroyNode, tlsCfg) }) } return g.Wait() } deferInstances := make([]spec.Instance, 0, 1) for _, instance := range instances { address := instance.(*spec.CDCInstance).GetAddr() client := api.NewCDCOpenAPIClient(ctx, []string{utils.JoinHostPort(instance.GetManageHost(), instance.GetPort())}, 5*time.Second, tlsCfg) capture, err := client.GetCaptureByAddr(address) if err != nil { // this may be caused by that the instance is not running, or the specified version of cdc does not support open api logger.Debugf("scale-in cdc, get capture by address failed, stop the instance by force, "+ "addr: %s, err: %+v", address, err) instCount[instance.GetManageHost()]-- if err := StopAndDestroyInstance(ctx, cluster, instance, options, true, instCount[instance.GetManageHost()] == 0, tlsCfg); err != nil { return err } continue } if capture.IsOwner { deferInstances = append(deferInstances, instance) logger.Debugf("Deferred scale-in the TiCDC owner %s", instance.ID()) continue } instCount[instance.GetManageHost()]-- if err := StopAndDestroyInstance(ctx, cluster, instance, options, false, instCount[instance.GetManageHost()] == 0, tlsCfg); err != nil { return err } } for _, instance := range deferInstances { instCount[instance.GetManageHost()]-- if err := StopAndDestroyInstance(ctx, cluster, instance, options, false, instCount[instance.GetManageHost()] == 0, tlsCfg); err != nil { return err } } return nil } tiup-1.16.3/pkg/cluster/operation/systemd.go000066400000000000000000000056751505422223000210540ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "context" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/module" "go.uber.org/zap" ) // GetServiceStatus return the Acitive line of status. /* [tidb@ip-172-16-5-70 deploy]$ sudo systemctl status drainer-8249.service ● drainer-8249.service - drainer-8249 service Loaded: loaded (/etc/systemd/system/drainer-8249.service; disabled; vendor preset: disabled) Active: active (running) since Mon 2020-03-09 13:56:19 CST; 1 weeks 3 days ago Main PID: 36718 (drainer) CGroup: /system.slice/drainer-8249.service └─36718 bin/drainer --addr=172.16.5.70:8249 --pd-urls=http://172.16.5.70:2379 --data-dir=/data1/deploy/data.drainer --log-file=/data1/deploy/log/drainer.log --config=conf/drainer.toml --initial-commit-ts=408375872006389761 Mar 09 13:56:19 ip-172-16-5-70 systemd[1]: Started drainer-8249 service. */ func GetServiceStatus(ctx context.Context, e ctxt.Executor, name string, scope string, systemdMode string) (active, memory string, since time.Duration, err error) { c := module.SystemdModuleConfig{ Unit: name, Action: "status", Scope: scope, SystemdMode: systemdMode, } systemd := module.NewSystemdModule(c) // ignore error since stopped service returns exit code 3 stdout, _, _ := systemd.Execute(ctx, e) lines := strings.SplitSeq(string(stdout), "\n") for line := range lines { words := strings.Split(strings.TrimSpace(line), " ") if len(words) >= 2 { switch words[0] { case "Active:": active = words[1] since = parseSystemctlSince(line) case "Memory:": memory = words[1] } } } if active == "" { err = errors.Errorf("unexpected output: %s", string(stdout)) } return } // `systemctl status xxx.service` returns as below // Active: active (running) since Sat 2021-03-27 10:51:11 CST; 41min ago func parseSystemctlSince(str string) (dur time.Duration) { // if service is not found or other error, don't need to parse it if str == "" { return 0 } defer func() { if dur == 0 { zap.L().Warn("failed to parse systemctl since", zap.String("value", str)) } }() parts := strings.Split(str, ";") if len(parts) != 2 { return } parts = strings.Split(parts[0], " ") if len(parts) < 3 { return } dateStr := strings.Join(parts[len(parts)-3:], " ") tm, err := time.Parse("2006-01-02 15:04:05 MST", dateStr) if err != nil { return } return time.Since(tm) } tiup-1.16.3/pkg/cluster/operation/upgrade.go000066400000000000000000000320651505422223000210040ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package operator import ( "context" "crypto/tls" "fmt" "os" "reflect" "strings" "time" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) var ( // register checkpoint for upgrade operation upgradePoint = checkpoint.Register(checkpoint.Field("instance", reflect.DeepEqual)) increaseLimitPoint = checkpoint.Register() ) // UpgradeWaitFunc is the function that is called after an instance has been upgraded type UpgradeWaitFunc func() // Upgrade the cluster. (actually, it's rolling restart) func Upgrade( ctx context.Context, topo spec.Topology, options Options, tlsCfg *tls.Config, currentVersion string, targetVersion string, waitFunc UpgradeWaitFunc, ) error { roleFilter := set.NewStringSet(options.Roles...) nodeFilter := set.NewStringSet(options.Nodes...) components := topo.ComponentsByUpdateOrder(currentVersion) components = FilterComponent(components, roleFilter) logger := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) systemdMode := string(topo.BaseTopo().GlobalOptions.SystemdMode) noAgentHosts := set.NewStringSet() uniqueHosts := set.NewStringSet() updcfg := &spec.UpdateConfig{ CurrentVersion: currentVersion, TargetVersion: targetVersion, } var cdcOpenAPIClient *api.CDCOpenAPIClient // client for cdc openapi, only used when upgrade cdc for _, component := range components { instances := FilterInstance(component.Instances(), nodeFilter) if len(instances) < 1 { continue } logger.Infof("Upgrading component %s", component.Name()) // perform pre-upgrade actions of component var origLeaderScheduleLimit int var origRegionScheduleLimit int var err error var tidbClient *api.TiDBClient var pdEndpoints []string forcePDEndpoints := os.Getenv(EnvNamePDEndpointOverwrite) // custom set PD endpoint list switch component.Name() { case spec.ComponentTiKV: if forcePDEndpoints != "" { pdEndpoints = strings.Split(forcePDEndpoints, ",") logger.Warnf("%s is set, using %s as PD endpoints", EnvNamePDEndpointOverwrite, pdEndpoints) } else { pdEndpoints = topo.(*spec.Specification).GetPDListWithManageHost() } pdClient := api.NewPDClient(ctx, pdEndpoints, 10*time.Second, tlsCfg) origLeaderScheduleLimit, origRegionScheduleLimit, err = increaseScheduleLimit(ctx, pdClient) if err != nil { // the config modifying error should be able to be safely ignored, as it will // be processed with current settings anyway. logger.Warnf("failed increasing schedule limit: %s, ignore", err) } else { defer func() { upgErr := decreaseScheduleLimit(pdClient, origLeaderScheduleLimit, origRegionScheduleLimit) if upgErr != nil { logger.Warnf( "failed decreasing schedule limit (original values should be: %s, %s), please check if their current values are reasonable: %s", fmt.Sprintf("leader-schedule-limit=%d", origLeaderScheduleLimit), fmt.Sprintf("region-schedule-limit=%d", origRegionScheduleLimit), upgErr, ) } }() } case spec.ComponentTiDB: dbs := topo.(*spec.Specification).TiDBServers endpoints := []string{} for _, db := range dbs { endpoints = append(endpoints, utils.JoinHostPort(db.GetManageHost(), db.StatusPort)) } if currentVersion != targetVersion && tidbver.TiDBSupportUpgradeAPI(currentVersion) && tidbver.TiDBSupportUpgradeAPI(targetVersion) { tidbClient = api.NewTiDBClient(ctx, endpoints, 10*time.Second, tlsCfg) err = tidbClient.StartUpgrade() if err != nil { return err } } default: // do nothing, kept for future usage with other components } // some instances are upgraded after others deferInstances := make([]spec.Instance, 0) for _, instance := range instances { // monitors uniqueHosts.Insert(instance.GetManageHost()) if instance.IgnoreMonitorAgent() { noAgentHosts.Insert(instance.GetManageHost()) } // Usage within the switch statement switch component.Name() { case spec.ComponentPD, spec.ComponentTSO, spec.ComponentScheduling: // defer PD related leader/primary to be upgraded after others isLeader, err := checkAndDeferPDLeader(ctx, topo, int(options.APITimeout), tlsCfg, instance) if err != nil { logger.Warnf("cannot found pd related leader/primary, ignore: %s, instance: %s", err, instance.ID()) return err } if isLeader { deferInstances = append(deferInstances, instance) logger.Debugf("Upgrading deferred instance %s...", instance.ID()) continue } case spec.ComponentCDC: ins := instance.(*spec.CDCInstance) address := ins.GetAddr() if !tidbver.TiCDCSupportRollingUpgrade(currentVersion) { logger.Debugf("rolling upgrade cdc not supported, upgrade by force, "+ "addr: %s, version: %s", address, currentVersion) options.Force = true if err := upgradeInstance(ctx, topo, instance, options, tlsCfg, updcfg); err != nil { options.Force = false return err } options.Force = false continue } // during the upgrade process, endpoint addresses should not change, so only new the client once. if cdcOpenAPIClient == nil { cdcOpenAPIClient = api.NewCDCOpenAPIClient(ctx, topo.(*spec.Specification).GetCDCListWithManageHost(), 5*time.Second, tlsCfg) } capture, err := cdcOpenAPIClient.GetCaptureByAddr(address) if err != nil { // After the previous status check, we know that the cdc instance should be `Up`, but know it cannot be found by address // perhaps since the specified version of cdc does not support open api, or the instance just crashed right away logger.Debugf("upgrade cdc, cannot found the capture by address: %s", address) if err := upgradeInstance(ctx, topo, instance, options, tlsCfg, updcfg); err != nil { return err } continue } if capture.IsOwner { deferInstances = append(deferInstances, instance) logger.Debugf("Deferred upgrading of TiCDC owner %s, captureID: %s, addr: %s", instance.ID(), capture.ID, address) continue } default: // do nothing, kept for future usage with other components } if err := upgradeInstance(ctx, topo, instance, options, tlsCfg, updcfg); err != nil { return err } if waitFunc != nil { waitFunc() } } // process deferred instances for _, instance := range deferInstances { logger.Debugf("Upgrading deferred instance %s...", instance.ID()) if err := upgradeInstance(ctx, topo, instance, options, tlsCfg, updcfg); err != nil { return err } } switch component.Name() { case spec.ComponentTiDB: if currentVersion != targetVersion && tidbver.TiDBSupportUpgradeAPI(currentVersion) && tidbver.TiDBSupportUpgradeAPI(targetVersion) { err = tidbClient.FinishUpgrade() if err != nil { return err } } default: // do nothing, kept for future usage with other components } } if topo.GetMonitoredOptions() == nil { return nil } return RestartMonitored(ctx, uniqueHosts.Slice(), noAgentHosts, topo.GetMonitoredOptions(), options.OptTimeout, systemdMode) } // checkAndDeferPDLeader checks the PD related leader/primary instance's status and defers its upgrade if necessary. func checkAndDeferPDLeader(ctx context.Context, topo spec.Topology, apiTimeout int, tlsCfg *tls.Config, instance spec.Instance) (isLeader bool, err error) { switch instance.ComponentName() { case spec.ComponentPD: isLeader, err = instance.(*spec.PDInstance).IsLeader(ctx, topo, apiTimeout, tlsCfg) case spec.ComponentScheduling: isLeader, err = instance.(*spec.SchedulingInstance).IsPrimary(ctx, topo, tlsCfg) case spec.ComponentTSO: isLeader, err = instance.(*spec.TSOInstance).IsPrimary(ctx, topo, tlsCfg) } if err != nil { return false, err } return isLeader, nil } func upgradeInstance( ctx context.Context, topo spec.Topology, instance spec.Instance, options Options, tlsCfg *tls.Config, updcfg *spec.UpdateConfig, ) (err error) { // insert checkpoint point := checkpoint.Acquire(ctx, upgradePoint, map[string]any{"instance": instance.ID()}) defer func() { point.Release(err, zap.String("instance", instance.ID())) }() if point.Hit() != nil { return nil } var rollingInstance spec.RollingUpdateInstance var isRollingInstance bool if !options.Force { rollingInstance, isRollingInstance = instance.(spec.RollingUpdateInstance) } err = executeSSHCommand(ctx, "Executing pre-upgrade command", instance.GetManageHost(), fmt.Sprintf(`export NODE="%s";export ROLE="%s";%s`, instance.ID(), instance.Role(), options.SSHCustomScripts.BeforeRestartInstance.Command()), ) if err != nil { return err } if isRollingInstance { err := rollingInstance.PreRestart(ctx, topo, int(options.APITimeout), tlsCfg, updcfg) if err != nil && !options.Force { return err } } systemdMode := string(topo.BaseTopo().GlobalOptions.SystemdMode) if err := restartInstance(ctx, instance, options.OptTimeout, tlsCfg, systemdMode); err != nil && !options.Force { return err } if isRollingInstance { err := rollingInstance.PostRestart(ctx, topo, tlsCfg, updcfg) if err != nil && !options.Force { return err } } err = executeSSHCommand(ctx, "Executing post-upgrade command", instance.GetManageHost(), fmt.Sprintf(`export NODE="%s";export ROLE="%s"; %s`, instance.ID(), instance.Role(), options.SSHCustomScripts.AfterRestartInstance.Command()), ) if err != nil { return err } return nil } // Addr returns the address of the instance. func Addr(ins spec.Instance) string { if ins.GetPort() == 0 || ins.GetPort() == 80 { panic(ins) } return utils.JoinHostPort(ins.GetManageHost(), ins.GetPort()) } var ( leaderScheduleLimitOffset = 32 regionScheduleLimitOffset = 512 // storeLimitOffset = 512 leaderScheduleLimitThreshold = 64 regionScheduleLimitThreshold = 1024 // storeLimitThreshold = 1024 ) // increaseScheduleLimit increases the schedule limit of leader and region for faster // rebalancing during the rolling restart / upgrade process func increaseScheduleLimit(ctx context.Context, pc *api.PDClient) ( currLeaderScheduleLimit int, currRegionScheduleLimit int, err error) { // insert checkpoint point := checkpoint.Acquire(ctx, increaseLimitPoint, map[string]any{}) defer func() { point.Release(err, zap.Int("currLeaderScheduleLimit", currLeaderScheduleLimit), zap.Int("currRegionScheduleLimit", currRegionScheduleLimit), ) }() if data := point.Hit(); data != nil { currLeaderScheduleLimit = int(data["currLeaderScheduleLimit"].(float64)) currRegionScheduleLimit = int(data["currRegionScheduleLimit"].(float64)) return } // query current values cfg, err := pc.GetConfig() if err != nil { return } val, ok := cfg["schedule.leader-schedule-limit"].(float64) if !ok { return currLeaderScheduleLimit, currRegionScheduleLimit, perrs.New("cannot get current leader-schedule-limit") } currLeaderScheduleLimit = int(val) val, ok = cfg["schedule.region-schedule-limit"].(float64) if !ok { return currLeaderScheduleLimit, currRegionScheduleLimit, perrs.New("cannot get current region-schedule-limit") } currRegionScheduleLimit = int(val) // increase values if currLeaderScheduleLimit < leaderScheduleLimitThreshold { newLimit := min(currLeaderScheduleLimit+leaderScheduleLimitOffset, leaderScheduleLimitThreshold) if err := pc.SetReplicationConfig("leader-schedule-limit", newLimit); err != nil { return currLeaderScheduleLimit, currRegionScheduleLimit, err } } if currRegionScheduleLimit < regionScheduleLimitThreshold { newLimit := min(currRegionScheduleLimit+regionScheduleLimitOffset, regionScheduleLimitThreshold) if err := pc.SetReplicationConfig("region-schedule-limit", newLimit); err != nil { // try to revert leader scheduler limit by our best effort, does not make sense // to handle this error again _ = pc.SetReplicationConfig("leader-schedule-limit", currLeaderScheduleLimit) return currLeaderScheduleLimit, currRegionScheduleLimit, err } } return } // decreaseScheduleLimit tries to set the schedule limit back to it's original with // the same offset value as increaseScheduleLimit added, with some sanity checks func decreaseScheduleLimit(pc *api.PDClient, origLeaderScheduleLimit, origRegionScheduleLimit int) error { if err := pc.SetReplicationConfig("leader-schedule-limit", origLeaderScheduleLimit); err != nil { return err } return pc.SetReplicationConfig("region-schedule-limit", origRegionScheduleLimit) } tiup-1.16.3/pkg/cluster/spec/000077500000000000000000000000001505422223000157525ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/spec/alertmanager.go000066400000000000000000000206641505422223000207530ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/config" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" ) // AlertmanagerSpec represents the AlertManager topology specification in topology.yaml type AlertmanagerSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` WebPort int `yaml:"web_port" default:"9093"` ClusterPort int `yaml:"cluster_port" default:"9094"` ListenHost string `yaml:"listen_host,omitempty" validate:"listen_host:editable"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` // This field allows users to define additional arguments for customization. AdditionalArgs []string `yaml:"additional_args,omitempty" validate:"additional_args:ignore"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` ConfigFilePath string `yaml:"config_file,omitempty" validate:"config_file:editable"` } // Role returns the component role of the instance func (s *AlertmanagerSpec) Role() string { return ComponentAlertmanager } // SSH returns the host and SSH port of the instance func (s *AlertmanagerSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *AlertmanagerSpec) GetMainPort() int { return s.WebPort } // GetManageHost returns the manage host of the instance func (s *AlertmanagerSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *AlertmanagerSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *AlertmanagerSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // AlertManagerComponent represents Alertmanager component. type AlertManagerComponent struct{ Topology } // Name implements Component interface. func (c *AlertManagerComponent) Name() string { return ComponentAlertmanager } // Role implements Component interface. func (c *AlertManagerComponent) Role() string { return RoleMonitor } // Source implements Component interface. func (c *AlertManagerComponent) Source() string { return ComponentAlertmanager } // CalculateVersion implements the Component interface func (c *AlertManagerComponent) CalculateVersion(_ string) string { // always not follow cluster version, use ""(latest) by default version := c.Topology.BaseTopo().AlertManagerVersion if version != nil { return *version } return "" } // SetVersion implements Component interface. func (c *AlertManagerComponent) SetVersion(version string) { *c.Topology.BaseTopo().AlertManagerVersion = version } // Instances implements Component interface. func (c *AlertManagerComponent) Instances() []Instance { alertmanagers := c.Topology.BaseTopo().Alertmanagers ins := make([]Instance, 0, len(alertmanagers)) for _, s := range alertmanagers { ins = append(ins, &AlertManagerInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: utils.Ternary(s.ListenHost != "", s.ListenHost, c.Topology.BaseTopo().GlobalOptions.ListenHost).(string), Port: s.WebPort, SSHP: s.SSHPort, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.WebPort, s.ClusterPort, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: func(_ context.Context, timeout time.Duration, _ *tls.Config, _ ...string) string { return statusByHost(s.GetManageHost(), s.WebPort, "/-/ready", timeout, nil) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.WebPort, timeout, tlsCfg) }, Component: c, }, topo: c.Topology, }) } return ins } // AlertManagerInstance represent the alert manager instance type AlertManagerInstance struct { BaseInstance topo Topology } // InitConfig implement Instance interface func (i *AlertManagerInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { gOpts := *i.topo.BaseTopo().GlobalOptions if err := i.BaseInstance.InitConfig(ctx, e, gOpts, deployUser, paths); err != nil { return err } // Transfer start script spec := i.InstanceSpec.(*AlertmanagerSpec) peers := []string{} for _, amspec := range i.topo.BaseTopo().Alertmanagers { peers = append(peers, utils.JoinHostPort(amspec.Host, amspec.ClusterPort)) } cfg := &scripts.AlertManagerScript{ WebListenAddr: utils.JoinHostPort(i.GetListenHost(), spec.WebPort), WebExternalURL: fmt.Sprintf("http://%s", utils.JoinHostPort(spec.Host, spec.WebPort)), ClusterPeers: peers, // ClusterListenAddr cannot use i.GetListenHost due to https://github.com/prometheus/alertmanager/issues/2284 and https://github.com/prometheus/alertmanager/issues/1271 ClusterListenAddr: utils.JoinHostPort(i.GetHost(), spec.ClusterPort), DeployDir: paths.Deploy, LogDir: paths.Log, DataDir: paths.Data[0], NumaNode: spec.NumaNode, // This field allows users to define additional arguments for customization. AdditionalArgs: spec.AdditionalArgs, } // doesn't work if _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil { return err } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_alertmanager_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_alertmanager.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } // transfer config dst = filepath.Join(paths.Deploy, "conf", "alertmanager.yml") if spec.ConfigFilePath != "" { return i.TransferLocalConfigFile(ctx, e, spec.ConfigFilePath, dst) } configPath := filepath.Join(paths.Cache, fmt.Sprintf("alertmanager_%s.yml", i.GetHost())) if err := config.NewAlertManagerConfig().ConfigToFile(configPath); err != nil { return err } if err := i.TransferLocalConfigFile(ctx, e, configPath, dst); err != nil { return err } // version is not used for alertmanager return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), "", i.OS(), i.Arch(), i.ComponentName()+".yml", paths) } // ScaleConfig deploy temporary config on scaling func (i *AlertManagerInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName string, clusterVersion string, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = topo return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *AlertManagerInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } tiup-1.16.3/pkg/cluster/spec/bindversion.go000066400000000000000000000014461505422223000206300ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "fmt" "strings" ) // ComponentSubDir maps a component with version to a subdir if needed func ComponentSubDir(comp, version string) string { if comp == ComponentSpark { return fmt.Sprintf("spark-%s-bin-hadoop2.7", strings.TrimLeft(version, "v")) } return "" } tiup-1.16.3/pkg/cluster/spec/cdc.go000066400000000000000000000274621505422223000170450ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // CDCSpec represents the CDC topology specification in topology.yaml type CDCSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"8300"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Offline bool `yaml:"offline,omitempty"` GCTTL int64 `yaml:"gc-ttl,omitempty" validate:"gc-ttl:editable"` TZ string `yaml:"tz,omitempty" validate:"tz:editable"` TiCDCClusterID string `yaml:"ticdc_cluster_id"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Role returns the component role of the instance func (s *CDCSpec) Role() string { return ComponentCDC } // SSH returns the host and SSH port of the instance func (s *CDCSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *CDCSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *CDCSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *CDCSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *CDCSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // CDCComponent represents CDC component. type CDCComponent struct{ Topology *Specification } // Name implements Component interface. func (c *CDCComponent) Name() string { return ComponentCDC } // Role implements Component interface. func (c *CDCComponent) Role() string { return ComponentCDC } // Source implements Component interface. func (c *CDCComponent) Source() string { source := c.Topology.ComponentSources.CDC if source != "" { return source } return ComponentCDC } // CalculateVersion implements the Component interface func (c *CDCComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.CDC if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *CDCComponent) SetVersion(version string) { c.Topology.ComponentVersions.CDC = version } // Instances implements Component interface. func (c *CDCComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.CDCServers)) for _, s := range c.Topology.CDCServers { instance := &CDCInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, }, StatusFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { return statusByHost(s.GetManageHost(), s.Port, "/status", timeout, tlsCfg) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, c.Topology} if s.DataDir != "" { instance.Dirs = append(instance.Dirs, s.DataDir) } ins = append(ins, instance) } return ins } // CDCInstance represent the CDC instance. type CDCInstance struct { BaseInstance topo Topology } // ScaleConfig deploy temporary config on scaling func (i *CDCInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, user string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, user, paths) } // InitConfig implements Instance interface. func (i *CDCInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*CDCSpec) globalConfig := topo.ServerConfigs.CDC instanceConfig := spec.Config version := i.CalculateVersion(clusterVersion) if !tidbver.TiCDCSupportConfigFile(version) { if len(globalConfig)+len(instanceConfig) > 0 { return errors.New("server_config is only supported with TiCDC version v4.0.13 or later") } } if !tidbver.TiCDCSupportClusterID(version) && spec.TiCDCClusterID != "" { return errors.New("ticdc_cluster_id is only supported with TiCDC version v6.2.0 or later") } pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, pdspec.GetAdvertiseClientURL(enableTLS)) } cfg := &scripts.CDCScript{ Addr: utils.JoinHostPort(i.GetListenHost(), spec.Port), AdvertiseAddr: utils.JoinHostPort(i.GetHost(), i.GetPort()), PD: strings.Join(pds, ","), GCTTL: spec.GCTTL, TZ: spec.TZ, ClusterID: spec.TiCDCClusterID, DataDirEnabled: tidbver.TiCDCSupportDataDir(version), ConfigFileEnabled: tidbver.TiCDCSupportConfigFile(version), TLSEnabled: enableTLS, DeployDir: paths.Deploy, LogDir: paths.Log, DataDir: utils.Ternary(tidbver.TiCDCSupportSortOrDataDir(version), spec.DataDir, "").(string), NumaNode: spec.NumaNode, } // doesn't work if _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil { return err } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_cdc_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_cdc.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } return i.MergeServerConfig(ctx, e, globalConfig, instanceConfig, paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *CDCInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } var _ RollingUpdateInstance = &CDCInstance{} // GetAddr return the address of this TiCDC instance func (i *CDCInstance) GetAddr() string { return utils.JoinHostPort(i.GetHost(), i.GetPort()) } // PreRestart implements RollingUpdateInstance interface. // All errors are ignored, to trigger hard restart. func (i *CDCInstance) PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, updcfg *UpdateConfig) error { tidbTopo, ok := topo.(*Specification) if !ok { panic("should be type of tidb topology") } logger, ok := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) if !ok { panic("logger not found") } address := i.GetAddr() // cdc rolling upgrade strategy only works if there are more than 2 captures if len(tidbTopo.CDCServers) <= 1 { logger.Debugf("cdc pre-restart skipped, only one capture in the topology, addr: %s", address) return nil } start := time.Now() client := api.NewCDCOpenAPIClient(ctx, topo.(*Specification).GetCDCListWithManageHost(), 5*time.Second, tlsCfg) if err := client.Healthy(); err != nil { logger.Debugf("cdc pre-restart skipped, the cluster unhealthy, trigger hard restart, "+ "addr: %s, err: %+v, elapsed: %+v", address, err, time.Since(start)) return nil } captures, err := client.GetAllCaptures() if err != nil { logger.Debugf("cdc pre-restart skipped, cannot get all captures, trigger hard restart, "+ "addr: %s, err: %+v, elapsed: %+v", address, err, time.Since(start)) return nil } var ( captureID string found bool isOwner bool ) for _, capture := range captures { if address == capture.AdvertiseAddr { found = true captureID = capture.ID isOwner = capture.IsOwner break } } // this may happen if the capture crashed right away. if !found { logger.Debugf("cdc pre-restart finished, cannot found the capture, trigger hard restart, "+ "addr: %s, elapsed: %+v", address, time.Since(start)) return nil } if isOwner { if err := client.ResignOwner(address); err != nil { // if resign the owner failed, no more need to drain the current capture, // since it's not allowed by the cdc. // return nil to trigger hard restart. logger.Debugf("cdc pre-restart finished, resign owner failed, trigger hard restart, "+ "addr: %s, captureID: %s, err: %+v, elapsed: %+v", address, captureID, err, time.Since(start)) return nil } } if err := client.DrainCapture(address, captureID, apiTimeoutSeconds); err != nil { logger.Debugf("cdc pre-restart finished, drain the capture failed, "+ "addr: %s, captureID: %s, err: %+v, elapsed: %+v", address, captureID, err, time.Since(start)) // if we drain any one capture failed, no need to drain other captures, just trigger hard restart return nil } logger.Debugf("cdc pre-restart success, addr: %s, captureID: %s, elapsed: %+v", address, captureID, time.Since(start)) return nil } // PostRestart implements RollingUpdateInstance interface. func (i *CDCInstance) PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, updcfg *UpdateConfig) error { logger, ok := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) if !ok { panic("logger not found") } start := time.Now() address := i.GetAddr() client := api.NewCDCOpenAPIClient(ctx, []string{utils.JoinHostPort(i.GetManageHost(), i.GetPort())}, 5*time.Second, tlsCfg) err := client.IsCaptureAlive() if err != nil { logger.Debugf("cdc post-restart finished, get capture status failed, addr: %s, err: %+v, elapsed: %+v", address, err, time.Since(start)) return nil } logger.Debugf("cdc post-restart success, addr: %s, elapsed: %+v", address, time.Since(start)) return nil } tiup-1.16.3/pkg/cluster/spec/dashboard.go000066400000000000000000000165461505422223000202440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" ) // DashboardSpec represents the Dashboard topology specification in topology.yaml type DashboardSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"12333"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *DashboardSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { if timeout < time.Second { timeout = statusQueryTimeout } state := statusByHost(s.GetManageHost(), s.Port, "/status", timeout, tlsCfg) return state } // Role returns the component role of the instance func (s *DashboardSpec) Role() string { return ComponentDashboard } // SSH returns the host and SSH port of the instance func (s *DashboardSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *DashboardSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *DashboardSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *DashboardSpec) IsImported() bool { // TiDB-Ansible do not support dashboard return false } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *DashboardSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // DashboardComponent represents Drainer component. type DashboardComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DashboardComponent) Name() string { return ComponentDashboard } // Role implements Component interface. func (c *DashboardComponent) Role() string { return ComponentDashboard } // Source implements Component interface. func (c *DashboardComponent) Source() string { source := c.Topology.ComponentSources.Dashboard if source != "" { return source } return ComponentDashboard } // CalculateVersion implements the Component interface func (c *DashboardComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.Dashboard if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *DashboardComponent) SetVersion(version string) { c.Topology.ComponentVersions.Dashboard = version } // Instances implements Component interface. func (c *DashboardComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.Drainers)) for _, s := range c.Topology.DashboardServers { ins = append(ins, &DashboardInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, c.Topology}) } return ins } // DashboardInstance represent the Ddashboard instance. type DashboardInstance struct { BaseInstance topo Topology } // ScaleConfig deploy temporary config on scaling func (i *DashboardInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, user string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, user, paths) } // InitConfig implements Instance interface. func (i *DashboardInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*DashboardSpec) pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, pdspec.GetAdvertiseClientURL(enableTLS)) } cfg := &scripts.DashboardScript{ // -h, --host string listen host of the Dashboard Server Host: i.GetListenHost(), TidbVersion: clusterVersion, DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, Port: spec.Port, NumaNode: spec.NumaNode, PD: strings.Join(pds, ","), TLSEnabled: enableTLS, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tidb-dashboard_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_tidb-dashboard.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.Dashboard if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), i.CalculateVersion(clusterVersion), i.OS(), i.Arch(), i.ComponentName()+".toml", paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *DashboardInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } tiup-1.16.3/pkg/cluster/spec/drainer.go000066400000000000000000000220271505422223000177300ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "os" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" ) // DrainerSpec represents the Drainer topology specification in topology.yaml type DrainerSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"8249"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` CommitTS *int64 `yaml:"commit_ts,omitempty" validate:"commit_ts:editable"` // do not use it anymore, exist for compatibility Offline bool `yaml:"offline,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *DrainerSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { if timeout < time.Second { timeout = statusQueryTimeout } state := statusByHost(s.GetManageHost(), s.Port, "/status", timeout, tlsCfg) if s.Offline { binlogClient, err := api.NewBinlogClient(pdList, timeout, tlsCfg) if err != nil { return state } id := utils.JoinHostPort(s.Host, s.Port) tombstone, _ := binlogClient.IsDrainerTombstone(ctx, id) if tombstone { state = "Tombstone" } else { state = "Pending Offline" } } return state } // Role returns the component role of the instance func (s *DrainerSpec) Role() string { return ComponentDrainer } // SSH returns the host and SSH port of the instance func (s *DrainerSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *DrainerSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *DrainerSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *DrainerSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *DrainerSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // DrainerComponent represents Drainer component. type DrainerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *DrainerComponent) Name() string { return ComponentDrainer } // Role implements Component interface. func (c *DrainerComponent) Role() string { return ComponentDrainer } // Source implements Component interface. func (c *DrainerComponent) Source() string { source := c.Topology.ComponentSources.Drainer if source != "" { return source } return ComponentDrainer } // CalculateVersion implements the Component interface func (c *DrainerComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.Drainer if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *DrainerComponent) SetVersion(version string) { c.Topology.ComponentVersions.Drainer = version } // Instances implements Component interface. func (c *DrainerComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.Drainers)) for _, s := range c.Topology.Drainers { ins = append(ins, &DrainerInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, c.Topology}) } return ins } // DrainerInstance represent the Drainer instance. type DrainerInstance struct { BaseInstance topo Topology } // ScaleConfig deploy temporary config on scaling func (i *DrainerInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, user string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, user, paths) } // InitConfig implements Instance interface. func (i *DrainerInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*DrainerSpec) nodeID := utils.JoinHostPort(i.GetHost(), i.GetPort()) // keep origin node id if is imported if i.IsImported() { nodeID = "" } pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, pdspec.GetAdvertiseClientURL(enableTLS)) } cfg := &scripts.DrainerScript{ NodeID: nodeID, Addr: utils.JoinHostPort(spec.Host, spec.Port), PD: strings.Join(pds, ","), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, NumaNode: spec.NumaNode, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_drainer_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_drainer.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.Drainer // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( clusterName, AnsibleImportedConfigPath, fmt.Sprintf( "%s-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort(), ), ) importConfig, err := os.ReadFile(configPath) if err != nil { return err } globalConfig, err = mergeImported(importConfig, globalConfig) if err != nil { return err } } // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return err } if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), clusterVersion, i.OS(), i.Arch(), i.ComponentName()+".toml", paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *DrainerInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.ssl-ca"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.ssl-cert"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.ssl-key"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.ssl-ca", "security.ssl-cert", "security.ssl-key", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } tiup-1.16.3/pkg/cluster/spec/grafana.go000066400000000000000000000365321505422223000177110ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "context" "crypto/tls" "fmt" "os" "path" "path/filepath" "reflect" "strings" "time" "text/template" "github.com/pingcap/errors" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/config" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "gopkg.in/ini.v1" ) // GrafanaSpec represents the Grafana topology specification in topology.yaml type GrafanaSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"3000"` DeployDir string `yaml:"deploy_dir,omitempty"` Config map[string]string `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` DashboardDir string `yaml:"dashboard_dir,omitempty" validate:"dashboard_dir:editable"` Username string `yaml:"username,omitempty" default:"admin" validate:"username:editable"` Password string `yaml:"password,omitempty" default:"admin" validate:"password:editable"` AnonymousEnable bool `yaml:"anonymous_enable" default:"false" validate:"anonymous_enable:editable"` RootURL string `yaml:"root_url" validate:"root_url:editable"` Domain string `yaml:"domain" validate:"domain:editable"` DefaultTheme string `yaml:"default_theme,omitempty" validate:"default_theme:editable"` OrgName string `yaml:"org_name,omitempty" validate:"org_name:editable"` OrgRole string `yaml:"org_role,omitempty" validate:"org_role:editable"` UseVMAsDatasource bool `yaml:"use_vm_as_datasource,omitempty" validate:"use_vm_as_datasource:editable"` } // Role returns the component role of the instance func (s *GrafanaSpec) Role() string { return ComponentGrafana } // SSH returns the host and SSH port of the instance func (s *GrafanaSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *GrafanaSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *GrafanaSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *GrafanaSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *GrafanaSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // GrafanaComponent represents Grafana component. type GrafanaComponent struct{ Topology } // Name implements Component interface. func (c *GrafanaComponent) Name() string { return ComponentGrafana } // Role implements Component interface. func (c *GrafanaComponent) Role() string { return RoleMonitor } // Source implements Component interface. func (c *GrafanaComponent) Source() string { return ComponentGrafana } // CalculateVersion implements the Component interface func (c *GrafanaComponent) CalculateVersion(clusterVersion string) string { // always not follow cluster version, use ""(latest) by default version := c.Topology.BaseTopo().GrafanaVersion if version != nil && *version != "" { return *version } return clusterVersion } // SetVersion implements Component interface. func (c *GrafanaComponent) SetVersion(version string) { *c.Topology.BaseTopo().GrafanaVersion = version } // Instances implements Component interface. func (c *GrafanaComponent) Instances() []Instance { servers := c.BaseTopo().Grafanas ins := make([]Instance, 0, len(servers)) for _, s := range servers { ins = append(ins, &GrafanaInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, NumaNode: "", NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, }, StatusFn: func(_ context.Context, timeout time.Duration, _ *tls.Config, _ ...string) string { return statusByHost(s.GetManageHost(), s.Port, "/login", timeout, nil) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, topo: c.Topology, }) } return ins } // GrafanaInstance represent the grafana instance type GrafanaInstance struct { BaseInstance topo Topology } // InitConfig implement Instance interface func (i *GrafanaInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { gOpts := *i.topo.BaseTopo().GlobalOptions if err := i.BaseInstance.InitConfig(ctx, e, gOpts, deployUser, paths); err != nil { return err } // transfer run script cfg := &scripts.GrafanaScript{DeployDir: paths.Deploy} fp := filepath.Join(paths.Cache, fmt.Sprintf("run_grafana_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_grafana.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } // transfer config spec := i.InstanceSpec.(*GrafanaSpec) fp = filepath.Join(paths.Cache, fmt.Sprintf("grafana_%s_%d.ini", i.GetHost(), i.GetPort())) if err := config.NewGrafanaConfig(i.GetHost(), paths.Deploy). WithPort(uint64(i.GetPort())). WithUsername(spec.Username). WithPassword(spec.Password). WithAnonymousenable(spec.AnonymousEnable). WithRootURL(spec.RootURL). WithDomain(spec.Domain). WithDefaultTheme(spec.DefaultTheme). WithOrgName(spec.OrgName). WithOrgRole(spec.OrgRole). ConfigToFile(fp); err != nil { return err } // doesn't work if _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil { return err } userConfig := i.topo.GetGrafanaConfig() if userConfig == nil { userConfig = make(map[string]string) } for k, v := range spec.Config { userConfig[k] = v } err := mergeAdditionalGrafanaConf(fp, userConfig) if err != nil { return err } dst = filepath.Join(paths.Deploy, "conf", "grafana.ini") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if err := i.installDashboards(ctx, e, paths.Deploy, clusterName, clusterVersion); err != nil { return errors.Annotate(err, "install dashboards") } // initial dashboards/*.json if err := i.initDashboards(ctx, e, i.InstanceSpec.(*GrafanaSpec), paths, clusterName); err != nil { return errors.Annotate(err, "initial dashboards") } // transfer dashboard.yml fp = filepath.Join(paths.Cache, fmt.Sprintf("dashboard_%s.yml", i.GetHost())) if err := config.NewDashboardConfig(clusterName, paths.Deploy).ConfigToFile(fp); err != nil { return err } dst = filepath.Join(paths.Deploy, "provisioning", "dashboards", "dashboard.yml") if err := i.TransferLocalConfigFile(ctx, e, fp, dst); err != nil { return err } // Get the grafana spec spec = i.InstanceSpec.(*GrafanaSpec) // Get monitors topo := reflect.ValueOf(i.topo) if topo.Kind() == reflect.Ptr { topo = topo.Elem() } val := topo.FieldByName("Monitors") if (val == reflect.Value{}) { return errors.Errorf("field Monitors not found in topology: %v", topo) } monitors := val.Interface().([]*PrometheusSpec) // transfer datasource.yml if len(monitors) == 0 { return errors.New("no prometheus found in topology") } // Create datasources configuration datasources := make([]*config.DatasourceConfig, 0) // Determine which datasource is default based on Grafana spec vmIsDefault := spec.UseVMAsDatasource && monitors[0].PromRemoteWriteToVM promIsDefault := !vmIsDefault // Add Prometheus datasource promDatasource := config.NewDatasourceConfig( clusterName, // not support tls fmt.Sprintf("http://%s", utils.JoinHostPort(monitors[0].Host, monitors[0].Port)), ).WithIsDefault(promIsDefault) datasources = append(datasources, promDatasource) // Add VM datasource if enabled if monitors[0].PromRemoteWriteToVM && monitors[0].NgPort > 0 { vmDatasource := config.NewDatasourceConfig( fmt.Sprintf("%s-vm", clusterName), // not support tls fmt.Sprintf("http://%s", utils.JoinHostPort(monitors[0].Host, monitors[0].NgPort)), ).WithIsDefault(vmIsDefault) datasources = append(datasources, vmDatasource) } // Write datasources configuration fp = filepath.Join(paths.Cache, fmt.Sprintf("datasource_%s.yml", i.GetHost())) content := bytes.NewBuffer(nil) // Create a map to hold all datasources datasourceMap := map[string]any{ "Datasources": datasources, } // Create a template for the datasource configuration tpl, err := embed.ReadTemplate(path.Join("templates", "config", "datasource.yml.tpl")) if err != nil { return err } tmpl, err := template.New("Datasource").Parse(string(tpl)) if err != nil { return err } if err := tmpl.Execute(content, datasourceMap); err != nil { return err } if err := utils.WriteFile(fp, content.Bytes(), 0644); err != nil { return err } dst = filepath.Join(paths.Deploy, "provisioning", "datasources", "datasource.yml") return i.TransferLocalConfigFile(ctx, e, fp, dst) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *GrafanaInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } func (i *GrafanaInstance) initDashboards(ctx context.Context, e ctxt.Executor, spec *GrafanaSpec, paths meta.DirPaths, clusterName string) error { dashboardsDir := filepath.Join(paths.Deploy, "dashboards") if spec.DashboardDir != "" { return i.TransferLocalConfigDir(ctx, e, spec.DashboardDir, dashboardsDir, func(name string) bool { return strings.HasSuffix(name, ".json") }) } cmds := []string{ "mkdir -p %[1]s", `find %[1]s -maxdepth 1 -type f -name "*.json" -delete`, `find %[2]s/bin -maxdepth 1 -type f -name "*.json" -exec cp {} %[1]s \;`, } _, stderr, err := e.Execute(ctx, fmt.Sprintf(strings.Join(cmds, " && "), dashboardsDir, paths.Deploy), false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } // Get the monitor component to check if VM is the default datasource topo := reflect.ValueOf(i.topo) if topo.Kind() == reflect.Ptr { topo = topo.Elem() } val := topo.FieldByName("Monitors") if (val == reflect.Value{}) { return errors.Errorf("field Monitors not found in topology: %v", topo) } // Determine which datasource to use in dashboards datasourceName := clusterName monitors := val.Interface().([]*PrometheusSpec) if len(monitors) > 0 && monitors[0].PromRemoteWriteToVM && monitors[0].NgPort > 0 && (spec.UseVMAsDatasource) { datasourceName = fmt.Sprintf("%s-vm", clusterName) } // Deal with the cluster name and datasource for _, cmd := range []string{ `find %s -type f -exec sed -i 's/\${DS_.*-CLUSTER}/%s/g' {} \;`, `find %s -type f -exec sed -i 's/DS_.*-CLUSTER/%s/g' {} \;`, `find %s -type f -exec sed -i 's/\${DS_LIGHTNING}/%s/g' {} \;`, `find %s -type f -exec sed -i 's/DS_LIGHTNING/%s/g' {} \;`, `find %s -type f -exec sed -i 's/test-cluster/%s/g' {} \;`, `find %s -type f -exec sed -i 's/Test-Cluster/%s/g' {} \;`, } { cmd := fmt.Sprintf(cmd, dashboardsDir, datasourceName) _, stderr, err := e.Execute(ctx, cmd, false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } } return nil } // We only really installDashboards for dm cluster because the dashboards(*.json) packed with // the grafana component is designed for tidb cluster (the dm cluster use the same cluster // component with tidb cluster), and the dashboards for dm cluster is packed in the dm-master // component. So if deploying tidb cluster, the dashboards is correct, if deploying dm cluster, // we should remove dashboards for tidb and install dashboards for dm. func (i *GrafanaInstance) installDashboards(ctx context.Context, e ctxt.Executor, deployDir, clusterName, clusterVersion string) error { if i.topo.Type() != TopoTypeDM { return nil } tmp := filepath.Join(deployDir, "_tiup_tmp") _, stderr, err := e.Execute(ctx, fmt.Sprintf("mkdir -p %s", tmp), false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } srcPath := PackagePath(GetDMMasterPackageName(i.topo), clusterVersion, i.OS(), i.Arch()) dstPath := filepath.Join(tmp, filepath.Base(srcPath)) err = e.Transfer(ctx, srcPath, dstPath, false, 0, false) if err != nil { return err } cmd := fmt.Sprintf(`tar --no-same-owner -zxf %s -C %s && rm %s`, dstPath, tmp, dstPath) _, stderr, err = e.Execute(ctx, cmd, false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } // copy dm-master/scripts/*.json targetDir := filepath.Join(deployDir, "bin") cmds := []string{ "mkdir -p %[1]s", `find %[1]s -maxdepth 1 -type f -name "*.json" -delete`, `find %[2]s/dm-master/scripts -type f -name "*.json" -exec cp {} %[1]s \;`, "rm -rf %[2]s", } _, stderr, err = e.Execute(ctx, fmt.Sprintf(strings.Join(cmds, " && "), targetDir, tmp), false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } return nil } // ScaleConfig deploy temporary config on scaling func (i *GrafanaInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName string, clusterVersion string, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = topo.Merge(i.topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } func mergeAdditionalGrafanaConf(source string, addition map[string]string) error { bytes, err := os.ReadFile(source) if err != nil { return err } result, err := ini.Load(bytes) if err != nil { return err } for k, v := range addition { // convert "log.file.level to [log.file] level" for i := len(k) - 1; i >= 0; i-- { if k[i] == '.' { result.Section(k[:i]).Key(k[i+1:]).SetValue(v) break } } } return result.SaveTo(source) } tiup-1.16.3/pkg/cluster/spec/grafana_test.go000066400000000000000000000301341505422223000207400ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "fmt" "os" "os/user" "path" "path/filepath" "strings" "testing" "time" "github.com/google/uuid" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestLocalDashboards(t *testing.T) { ctx := ctxt.New(context.Background(), 0, logprinter.NewLogger("")) deployDir, err := os.MkdirTemp("", "tiup-*") assert.Nil(t, err) defer os.RemoveAll(deployDir) localDir, err := filepath.Abs("./testdata/dashboards") assert.Nil(t, err) topo := new(Specification) topo.Grafanas = append(topo.Grafanas, &GrafanaSpec{ Host: "127.0.0.1", Port: 3000, DashboardDir: localDir, }) comp := GrafanaComponent{topo} ints := comp.Instances() assert.Equal(t, len(ints), 1) grafanaInstance := ints[0].(*GrafanaInstance) user, err := user.Current() assert.Nil(t, err) e, err := executor.New(executor.SSHTypeNone, false, executor.SSHConfig{Host: "127.0.0.1", User: user.Username}) assert.Nil(t, err) clusterName := "tiup-test-cluster-" + uuid.New().String() err = grafanaInstance.initDashboards(ctx, e, topo.Grafanas[0], meta.DirPaths{Deploy: deployDir}, clusterName) assert.Nil(t, err) assert.FileExists(t, path.Join(deployDir, "dashboards", "tidb.json")) fs, err := os.ReadDir(localDir) assert.Nil(t, err) for _, f := range fs { assert.FileExists(t, path.Join(deployDir, "dashboards", f.Name())) } } func TestMergeAdditionalGrafanaConf(t *testing.T) { file, err := os.CreateTemp("", "tiup-cluster-spec-test") if err != nil { panic(fmt.Sprintf("create temp file: %s", err)) } defer os.Remove(file.Name()) _, err = file.WriteString(`#################################### SMTP / Emailing ########################## [smtp] ;enabled = false ;host = localhost:25 ;user = password = ` + "`1#2`" + ` ;cert_file = ;key_file = ;skip_verify = false ;from_address = admin@grafana.localhost [emails] ;welcome_email_on_sign_up = false #################################### Logging ########################## [log] # Either "console", "file", "syslog". Default is console and file # Use space to separate multiple modes, e.g. "console file" mode = file # Either "trace", "debug", "info", "warn", "error", "critical", default is "info" ;level = info # For "console" mode only [log.console] ;level = # log line format, valid options are text, console and json ;format = console # For "file" mode only [log.file] level = info `) assert.Nil(t, err) expected := `# ################################### SMTP / Emailing ########################## [smtp] ; enabled = false ; host = localhost:25 ; user = password = ` + "`1#2`" + ` enabled = true ; cert_file = ; key_file = ; skip_verify = false ; from_address = admin@grafana.localhost [emails] ; welcome_email_on_sign_up = false # ################################### Logging ########################## [log] # Either "console", "file", "syslog". Default is console and file # Use space to separate multiple modes, e.g. "console file" mode = file # Either "trace", "debug", "info", "warn", "error", "critical", default is "info" ; level = info # For "console" mode only [log.console] ; level = # log line format, valid options are text, console and json ; format = console # For "file" mode only [log.file] level = warning ` addition := map[string]string{ "log.file.level": "warning", "smtp.enabled": "true", } err = mergeAdditionalGrafanaConf(file.Name(), addition) assert.Nil(t, err) result, err := os.ReadFile(file.Name()) assert.Nil(t, err) assert.Equal(t, expected, string(result)) } type mockExecutor struct { executeFunc func(ctx context.Context, cmd string, sudo bool, timeouts ...time.Duration) ([]byte, []byte, error) } func (e *mockExecutor) Execute(ctx context.Context, cmd string, sudo bool, timeouts ...time.Duration) (stdout []byte, stderr []byte, err error) { if e.executeFunc != nil { return e.executeFunc(ctx, cmd, sudo, timeouts...) } return nil, nil, nil } func (e *mockExecutor) Transfer(ctx context.Context, src, dst string, download bool, limit int, compress bool) error { // Copy the file for testing if !download { err := os.MkdirAll(filepath.Dir(dst), 0755) if err != nil { return err } content, err := os.ReadFile(src) if err != nil { return err } return os.WriteFile(dst, content, 0644) } return nil } func TestGrafanaDatasourceConfig(t *testing.T) { ctx := context.Background() deployDir := t.TempDir() cacheDir := t.TempDir() // Create paths structure paths := meta.DirPaths{ Deploy: deployDir, Cache: cacheDir, } // Create mock executor mockExec := &mockExecutor{} // Create test topology with both Prometheus and VM topo := new(Specification) topo.Monitors = []*PrometheusSpec{ { Host: "127.0.0.1", Port: 9090, NgPort: 12020, PromRemoteWriteToVM: true, }, } topo.Grafanas = []*GrafanaSpec{ { Host: "127.0.0.1", Port: 3000, }, } // Create Grafana component comp := GrafanaComponent{topo} grafanaInstance := comp.Instances()[0].(*GrafanaInstance) // Test datasource configuration clusterName := "test-cluster" err := grafanaInstance.InitConfig(ctxt.New(ctx, 0, logprinter.NewLogger("")), mockExec, clusterName, "v5.4.0", "tidb", paths) require.NoError(t, err) // Verify the datasource configuration file dsContent, err := os.ReadFile(filepath.Join(deployDir, "provisioning", "datasources", "datasource.yml")) require.NoError(t, err) // Check if the content contains both Prometheus and VM datasources assert.Contains(t, string(dsContent), fmt.Sprintf("name: %s", clusterName)) assert.Contains(t, string(dsContent), fmt.Sprintf("name: %s-vm", clusterName)) assert.Contains(t, string(dsContent), "type: prometheus") assert.Contains(t, string(dsContent), "url: http://127.0.0.1:9090") // Verify Prometheus is the default datasource assert.Contains(t, string(dsContent), fmt.Sprintf(`name: %s`, clusterName)) assert.Contains(t, string(dsContent), `isDefault: true`) assert.Contains(t, string(dsContent), `url: http://127.0.0.1:9090`) assert.Contains(t, string(dsContent), fmt.Sprintf(`name: %s-vm`, clusterName)) assert.Contains(t, string(dsContent), `url: http://127.0.0.1:12020`) // Test without VM remote write enabled topo.Monitors[0].PromRemoteWriteToVM = false err = grafanaInstance.InitConfig(ctxt.New(ctx, 0, logprinter.NewLogger("")), mockExec, clusterName, "v5.4.0", "tidb", paths) require.NoError(t, err) // Verify the datasource configuration file again dsContent, err = os.ReadFile(filepath.Join(deployDir, "provisioning", "datasources", "datasource.yml")) require.NoError(t, err) // Check if the content contains only Prometheus datasource assert.Contains(t, string(dsContent), fmt.Sprintf("name: %s", clusterName)) assert.NotContains(t, string(dsContent), fmt.Sprintf("name: %s-vm", clusterName)) assert.Contains(t, string(dsContent), "type: prometheus") assert.Contains(t, string(dsContent), "url: http://127.0.0.1:9090") } // TestVictoriaMetricsDefaultDatasource tests that when Victoria Metrics is set as the default datasource, // the dashboards correctly use it instead of Prometheus func TestVictoriaMetricsDefaultDatasource(t *testing.T) { ctx := context.Background() deployDir := t.TempDir() cacheDir := t.TempDir() // Create paths structure with folders needed for dashboards paths := meta.DirPaths{ Deploy: deployDir, Cache: cacheDir, } // Create the necessary directory structure dashboardsDir := filepath.Join(deployDir, "dashboards") binDir := filepath.Join(deployDir, "bin") err := os.MkdirAll(dashboardsDir, 0755) require.NoError(t, err) err = os.MkdirAll(binDir, 0755) require.NoError(t, err) // Create a mock for the execute function to handle the dashboard copy command origExecutor := &mockExecutor{ executeFunc: func(ctx context.Context, cmd string, sudo bool, timeouts ...time.Duration) ([]byte, []byte, error) { // Manually perform what the command would do if strings.Contains(cmd, "find") && strings.Contains(cmd, "cp") { // Create the dashboard file by copying it from bin to dashboards dir content, err := os.ReadFile(filepath.Join(binDir, "sample.json")) if err != nil { return nil, nil, err } err = os.WriteFile(filepath.Join(dashboardsDir, "sample.json"), content, 0644) if err != nil { return nil, nil, err } } else if strings.Contains(cmd, "sed") { // Handle the sed command to replace datasource references files, err := os.ReadDir(dashboardsDir) if err != nil { return nil, nil, err } for _, file := range files { if strings.HasSuffix(file.Name(), ".json") { content, err := os.ReadFile(filepath.Join(dashboardsDir, file.Name())) if err != nil { return nil, nil, err } // Replace datasource references - simulating what sed would do modifiedContent := strings.ReplaceAll(string(content), `"DS_TEST-CLUSTER"`, fmt.Sprintf(`"DS_%s-VM"`, strings.ToUpper("test-cluster"))) modifiedContent = strings.ReplaceAll(modifiedContent, `"text": "test-cluster"`, fmt.Sprintf(`"text": "%s-vm"`, "test-cluster")) modifiedContent = strings.ReplaceAll(modifiedContent, `"value": "test-cluster"`, fmt.Sprintf(`"value": "%s-vm"`, "test-cluster")) err = os.WriteFile(filepath.Join(dashboardsDir, file.Name()), []byte(modifiedContent), 0644) if err != nil { return nil, nil, err } } } } return nil, nil, nil }, } // Create a sample dashboard file with datasource references dashboardContent := `{ "annotations": { "list": [] }, "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, "links": [], "liveNow": false, "panels": [], "refresh": "", "schemaVersion": 38, "style": "dark", "tags": [], "templating": { "list": [ { "current": { "selected": false, "text": "test-cluster", "value": "test-cluster" }, "hide": 0, "includeAll": false, "label": "Datasource", "multi": false, "name": "DS_TEST-CLUSTER", "options": [], "query": "prometheus", "refresh": 1, "regex": "", "skipUrlSync": false, "type": "datasource" } ] }, "title": "Test Dashboard", "uid": "test", "version": 1, "weekStart": "" }` err = os.WriteFile(filepath.Join(binDir, "sample.json"), []byte(dashboardContent), 0644) require.NoError(t, err) // Create test topology with VM as default datasource topo := new(Specification) topo.Monitors = []*PrometheusSpec{ { Host: "127.0.0.1", Port: 9090, NgPort: 12020, PromRemoteWriteToVM: true, }, } topo.Grafanas = []*GrafanaSpec{ { Host: "127.0.0.1", Port: 3000, UseVMAsDatasource: true, }, } // Create Grafana component with VM as default datasource comp := GrafanaComponent{topo} grafanaInstance := comp.Instances()[0].(*GrafanaInstance) // Run InitConfig which will process dashboards err = grafanaInstance.InitConfig(ctxt.New(ctx, 0, logprinter.NewLogger("")), origExecutor, "test-cluster", "v5.4.0", "tidb", paths) require.NoError(t, err) // Check if the dashboard file was created and datasource references were updated dashboardFile := filepath.Join(dashboardsDir, "sample.json") content, err := os.ReadFile(dashboardFile) require.NoError(t, err) // Verify VM datasource was used assert.Contains(t, string(content), `"DS_TEST-CLUSTER-VM"`) assert.Contains(t, string(content), `"text": "test-cluster-vm"`) assert.Contains(t, string(content), `"value": "test-cluster-vm"`) } tiup-1.16.3/pkg/cluster/spec/instance.go000066400000000000000000000416751505422223000201220ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "os" "path" "path/filepath" "reflect" "strings" "time" "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/module" system "github.com/pingcap/tiup/pkg/cluster/template/systemd" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) // Components names const ( ComponentTiDB = "tidb" ComponentTiKV = "tikv" ComponentTiKVWorker = "tikv-worker" ComponentPD = "pd" ComponentTSO = "tso" ComponentScheduling = "scheduling" ComponentTiFlash = "tiflash" ComponentTiProxy = "tiproxy" ComponentGrafana = "grafana" ComponentDrainer = "drainer" ComponentDashboard = "tidb-dashboard" ComponentPump = "pump" ComponentCDC = "cdc" ComponentTiKVCDC = "tikv-cdc" ComponentTiSpark = "tispark" ComponentSpark = "spark" ComponentAlertmanager = "alertmanager" ComponentDMMaster = "dm-master" ComponentDMWorker = "dm-worker" ComponentPrometheus = "prometheus" ComponentBlackboxExporter = "blackbox_exporter" ComponentNodeExporter = "node_exporter" ComponentCheckCollector = "insight" ) var ( // CopyConfigFile is the checkpoint to cache config file transfer action CopyConfigFile = checkpoint.Register( checkpoint.Field("config-file", reflect.DeepEqual), ) ) // Component represents a component of the cluster. type Component interface { Name() string Role() string Source() string Instances() []Instance CalculateVersion(string) string SetVersion(string) } // UpdateConfig is used to control behavior pre/post hook of instances. type UpdateConfig struct { CurrentVersion string TargetVersion string } // RollingUpdateInstance represent a instance need to transfer state when restart. // e.g transfer leader. type RollingUpdateInstance interface { PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, extra *UpdateConfig) error PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, extra *UpdateConfig) error } // Instance represents the instance. type Instance interface { InstanceSpec ID() string Ready(context.Context, ctxt.Executor, uint64, *tls.Config) error InitConfig(ctx context.Context, e ctxt.Executor, clusterName string, clusterVersion string, deployUser string, paths meta.DirPaths) error ScaleConfig(ctx context.Context, e ctxt.Executor, topo Topology, clusterName string, clusterVersion string, deployUser string, paths meta.DirPaths) error PrepareStart(ctx context.Context, tlsCfg *tls.Config) error ComponentName() string ComponentSource() string InstanceName() string ServiceName() string ResourceControl() meta.ResourceControl GetHost() string GetManageHost() string GetPort() int GetSSHPort() int GetNumaNode() string GetNumaCores() string DeployDir() string UsedPorts() []int UsedDirs() []string Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string Uptime(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration DataDir() string LogDir() string OS() string // only linux supported now Arch() string IsPatched() bool SetPatched(bool) CalculateVersion(string) string // SetVersion(string) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) } // PortStarted wait until a port is being listened func PortStarted(ctx context.Context, e ctxt.Executor, port int, timeout uint64) error { c := module.WaitForConfig{ Port: port, State: "started", Timeout: time.Second * time.Duration(timeout), } w := module.NewWaitFor(c) return w.Execute(ctx, e) } // PortStopped wait until a port is being released func PortStopped(ctx context.Context, e ctxt.Executor, port int, timeout uint64) error { c := module.WaitForConfig{ Port: port, State: "stopped", Timeout: time.Second * time.Duration(timeout), } w := module.NewWaitFor(c) return w.Execute(ctx, e) } // BaseInstance implements some method of Instance interface.. type BaseInstance struct { InstanceSpec Name string Host string ManageHost string ListenHost string Port int SSHP int Source string NumaNode string NumaCores string Ports []int Dirs []string StatusFn func(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdHosts ...string) string UptimeFn func(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration Component Component } // Ready implements Instance interface func (i *BaseInstance) Ready(ctx context.Context, e ctxt.Executor, timeout uint64, _ *tls.Config) error { return PortStarted(ctx, e, i.Port, timeout) } // InitConfig init the service configuration. func (i *BaseInstance) InitConfig(ctx context.Context, e ctxt.Executor, opt GlobalOptions, user string, paths meta.DirPaths) (err error) { comp := i.ComponentName() host := i.GetHost() port := i.GetPort() sysCfg := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, host, port)) // insert checkpoint point := checkpoint.Acquire(ctx, CopyConfigFile, map[string]any{"config-file": sysCfg}) defer func() { point.Release(err, zap.String("config-file", sysCfg)) }() if point.Hit() != nil { return nil } systemdMode := opt.SystemdMode if len(systemdMode) == 0 { systemdMode = SystemMode } resource := MergeResourceControl(opt.ResourceControl, i.ResourceControl()) systemCfg := system.NewConfig(comp, user, paths.Deploy). WithMemoryLimit(resource.MemoryLimit). WithCPUQuota(resource.CPUQuota). WithLimitCORE(resource.LimitCORE). WithTimeoutStartSec(resource.TimeoutStartSec). WithTimeoutStopSec(resource.TimeoutStopSec). WithIOReadBandwidthMax(resource.IOReadBandwidthMax). WithIOWriteBandwidthMax(resource.IOWriteBandwidthMax). WithSystemdMode(string(systemdMode)) // For not auto start if using binlogctl to offline. // bad design if comp == ComponentPump || comp == ComponentDrainer { systemCfg.Restart = "on-failure" } if err := systemCfg.ConfigToFile(sysCfg); err != nil { return errors.Trace(err) } tgt := filepath.Join("/tmp", comp+"_"+uuid.New().String()+".service") if err := e.Transfer(ctx, sysCfg, tgt, false, 0, false); err != nil { return errors.Annotatef(err, "transfer from %s to %s failed", sysCfg, tgt) } systemdDir := "/etc/systemd/system/" sudo := true if opt.SystemdMode == UserMode { systemdDir = "~/.config/systemd/user/" sudo = false } cmd := fmt.Sprintf("mv %s %s%s-%d.service", tgt, systemdDir, comp, port) if _, _, err := e.Execute(ctx, cmd, sudo); err != nil { return errors.Annotatef(err, "execute: %s", cmd) } // restorecon restores SELinux Contexts // Check with: ls -lZ /path/to/file // If the context is wrong systemctl will complain about a missing unit file // Note that we won't check for errors here because: // - We don't support SELinux in Enforcing mode // - restorecon might not be available (Ubuntu doesn't install SELinux tools by default) cmd = fmt.Sprintf("restorecon %s%s-%d.service", systemdDir, comp, port) e.Execute(ctx, cmd, sudo) //nolint // doesn't work if _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil { return err } return nil } // setTLSConfig set TLS Config to support enable/disable TLS // baseInstance no need to configure TLS func (i *BaseInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } // TransferLocalConfigFile scp local config file to remote // Precondition: the user on remote have permission to access & mkdir of dest files func (i *BaseInstance) TransferLocalConfigFile(ctx context.Context, e ctxt.Executor, local, remote string) error { remoteDir := filepath.Dir(remote) // make sure the directory exists cmd := fmt.Sprintf("mkdir -p %s", remoteDir) if _, _, err := e.Execute(ctx, cmd, false); err != nil { return errors.Annotatef(err, "execute: %s", cmd) } if err := e.Transfer(ctx, local, remote, false, 0, false); err != nil { return errors.Annotatef(err, "transfer from %s to %s failed", local, remote) } return nil } // TransferLocalConfigDir scp local config directory to remote // Precondition: the user on remote have right to access & mkdir of dest files func (i *BaseInstance) TransferLocalConfigDir(ctx context.Context, e ctxt.Executor, local, remote string, filter func(string) bool) error { return i.IteratorLocalConfigDir(ctx, local, filter, func(fname string) error { localPath := path.Join(local, fname) remotePath := path.Join(remote, fname) if err := i.TransferLocalConfigFile(ctx, e, localPath, remotePath); err != nil { return errors.Annotatef(err, "transfer local config (%s -> %s) failed", localPath, remotePath) } return nil }) } // IteratorLocalConfigDir iterators the local dir with filter, then invoke f for each found fileName func (i *BaseInstance) IteratorLocalConfigDir(ctx context.Context, local string, filter func(string) bool, f func(string) error) error { files, err := os.ReadDir(local) if err != nil { return errors.Annotatef(err, "read local directory %s failed", local) } for _, file := range files { if filter != nil && !filter(file.Name()) { continue } if err := f(file.Name()); err != nil { return err } } return nil } // MergeServerConfig merges the server configuration and overwrite the global configuration func (i *BaseInstance) MergeServerConfig(ctx context.Context, e ctxt.Executor, globalConf, instanceConf map[string]any, paths meta.DirPaths) error { fp := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort())) conf, err := Merge2Toml(i.ComponentName(), globalConf, instanceConf) if err != nil { return err } err = utils.WriteFile(fp, conf, os.ModePerm) if err != nil { return err } dst := filepath.Join(paths.Deploy, "conf", fmt.Sprintf("%s.toml", i.ComponentName())) // transfer config return e.Transfer(ctx, fp, dst, false, 0, false) } // mergeTiFlashLearnerServerConfig merges the server configuration and overwrite the global configuration func (i *BaseInstance) mergeTiFlashLearnerServerConfig(ctx context.Context, e ctxt.Executor, globalConf, instanceConf map[string]any, paths meta.DirPaths) error { fp := filepath.Join(paths.Cache, fmt.Sprintf("%s-learner-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort())) conf, err := Merge2Toml(i.ComponentName()+"-learner", globalConf, instanceConf) if err != nil { return err } err = utils.WriteFile(fp, conf, os.ModePerm) if err != nil { return err } dst := filepath.Join(paths.Deploy, "conf", fmt.Sprintf("%s-learner.toml", i.ComponentName())) // transfer config return e.Transfer(ctx, fp, dst, false, 0, false) } // ID returns the identifier of this instance, the ID is constructed by host:port func (i *BaseInstance) ID() string { return utils.JoinHostPort(i.Host, i.Port) } // ComponentName implements Instance interface func (i *BaseInstance) ComponentName() string { return i.Name } // ComponentSource implements Instance interface func (i *BaseInstance) ComponentSource() string { if i.Source != "" { return i.Source } else if i.Component.Source() != "" { return i.Component.Source() } return i.ComponentName() } // InstanceName implements Instance interface func (i *BaseInstance) InstanceName() string { if i.Port > 0 { return fmt.Sprintf("%s%d", i.Name, i.Port) } return i.ComponentName() } // ServiceName implements Instance interface func (i *BaseInstance) ServiceName() string { var name string switch i.ComponentName() { case ComponentSpark, ComponentTiSpark: name = i.Role() default: name = i.Name } if i.Port > 0 { return fmt.Sprintf("%s-%d.service", name, i.Port) } return fmt.Sprintf("%s.service", name) } // GetHost implements Instance interface func (i *BaseInstance) GetHost() string { return i.Host } // GetManageHost implements Instance interface func (i *BaseInstance) GetManageHost() string { if i.ManageHost != "" { return i.ManageHost } return i.Host } // GetListenHost implements Instance interface func (i *BaseInstance) GetListenHost() string { if i.ListenHost == "" { // ipv6 address if strings.Contains(i.Host, ":") { return "::" } return "0.0.0.0" } return i.ListenHost } // GetSSHPort implements Instance interface func (i *BaseInstance) GetSSHPort() int { return i.SSHP } // GetNumaNode implements Instance interface func (i *BaseInstance) GetNumaNode() string { return i.NumaNode } // GetNumaCores implements Instance interface func (i *BaseInstance) GetNumaCores() string { return i.NumaCores } // DeployDir implements Instance interface func (i *BaseInstance) DeployDir() string { return reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("DeployDir").String() } // TLSDir implements Instance interface func (i *BaseInstance) TLSDir() string { return i.DeployDir() } // DataDir implements Instance interface func (i *BaseInstance) DataDir() string { dataDir := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("DataDir") if !dataDir.IsValid() { return "" } // the default data_dir is relative to deploy_dir if dataDir.String() != "" && !strings.HasPrefix(dataDir.String(), "/") { return filepath.Join(i.DeployDir(), dataDir.String()) } return dataDir.String() } // LogDir implements Instance interface func (i *BaseInstance) LogDir() string { logDir := "" field := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("LogDir") if field.IsValid() { logDir = field.Interface().(string) } if logDir == "" { logDir = "log" } if !strings.HasPrefix(logDir, "/") { logDir = filepath.Join(i.DeployDir(), logDir) } return logDir } // OS implements Instance interface func (i *BaseInstance) OS() string { v := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("OS") if !v.IsValid() { return "" } return v.Interface().(string) } // Arch implements Instance interface func (i *BaseInstance) Arch() string { v := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("Arch") if !v.IsValid() { return "" } return v.Interface().(string) } // IsPatched implements Instance interface func (i *BaseInstance) IsPatched() bool { v := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("Patched") if !v.IsValid() { return false } return v.Bool() } // SetPatched implements the Instance interface func (i *BaseInstance) SetPatched(p bool) { v := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("Patched") if !v.CanSet() { return } v.SetBool(p) } // CalculateVersion implements the Instance interface func (i *BaseInstance) CalculateVersion(globalVersion string) string { return i.Component.CalculateVersion(globalVersion) } // PrepareStart checks instance requirements before starting func (i *BaseInstance) PrepareStart(ctx context.Context, tlsCfg *tls.Config) error { return nil } // MergeResourceControl merge the rhs into lhs and overwrite rhs if lhs has value for same field func MergeResourceControl(lhs, rhs meta.ResourceControl) meta.ResourceControl { if rhs.MemoryLimit != "" { lhs.MemoryLimit = rhs.MemoryLimit } if rhs.CPUQuota != "" { lhs.CPUQuota = rhs.CPUQuota } if rhs.IOReadBandwidthMax != "" { lhs.IOReadBandwidthMax = rhs.IOReadBandwidthMax } if rhs.IOWriteBandwidthMax != "" { lhs.IOWriteBandwidthMax = rhs.IOWriteBandwidthMax } if rhs.LimitCORE != "" { lhs.LimitCORE = rhs.LimitCORE } return lhs } // ResourceControl return cgroups config of instance func (i *BaseInstance) ResourceControl() meta.ResourceControl { if v := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("ResourceControl"); v.IsValid() { return v.Interface().(meta.ResourceControl) } return meta.ResourceControl{} } // GetPort implements Instance interface func (i *BaseInstance) GetPort() int { return i.Port } // UsedPorts implements Instance interface func (i *BaseInstance) UsedPorts() []int { return i.Ports } // UsedDirs implements Instance interface func (i *BaseInstance) UsedDirs() []string { return i.Dirs } // Status implements Instance interface func (i *BaseInstance) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { return i.StatusFn(ctx, timeout, tlsCfg, pdList...) } // Uptime implements Instance interface func (i *BaseInstance) Uptime(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return i.UptimeFn(ctx, timeout, tlsCfg) } tiup-1.16.3/pkg/cluster/spec/monitoring.go000066400000000000000000000561451505422223000205010ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "os" "path" "path/filepath" "reflect" "regexp" "slices" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/config" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" "gopkg.in/yaml.v3" ) // PrometheusSpec represents the Prometheus Server topology specification in topology.yaml type PrometheusSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"9090"` NgPort int `yaml:"ng_port,omitempty" validate:"ng_port:editable"` // ng_port is usable since v5.3.0 and default as 12020 since v5.4.0, so the default value is set in spec.go/AdjustByVersion DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` PromRemoteWriteToVM bool `yaml:"prom_remote_write_to_vm,omitempty" validate:"prom_remote_write_to_vm:editable"` // Enable remote write to ng-monitoring EnablePromAgentMode bool `yaml:"enable_prom_agent_mode,omitempty" validate:"enable_prom_agent_mode:editable"` // Enable Prometheus agent mode RemoteConfig Remote `yaml:"remote_config,omitempty" validate:"remote_config:ignore"` ExternalAlertmanagers []ExternalAlertmanager `yaml:"external_alertmanagers" validate:"external_alertmanagers:ignore"` PushgatewayAddrs []string `yaml:"pushgateway_addrs,omitempty" validate:"pushgateway_addrs:ignore"` Retention string `yaml:"storage_retention,omitempty" validate:"storage_retention:editable"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` RuleDir string `yaml:"rule_dir,omitempty" validate:"rule_dir:editable"` AdditionalScrapeConf map[string]any `yaml:"additional_scrape_conf,omitempty" validate:"additional_scrape_conf:ignore"` ScrapeInterval string `yaml:"scrape_interval,omitempty" validate:"scrape_interval:editable"` ScrapeTimeout string `yaml:"scrape_timeout,omitempty" validate:"scrape_timeout:editable"` AdditionalArgs []string `yaml:"additional_args,omitempty" validate:"additional_args:ignore"` } // Remote prometheus remote config type Remote struct { RemoteWrite []map[string]any `yaml:"remote_write,omitempty" validate:"remote_write:ignore"` RemoteRead []map[string]any `yaml:"remote_read,omitempty" validate:"remote_read:ignore"` } // ExternalAlertmanager configs prometheus to include alertmanagers not deployed in current cluster type ExternalAlertmanager struct { Host string `yaml:"host"` WebPort int `yaml:"web_port" default:"9093"` } // Role returns the component role of the instance func (s *PrometheusSpec) Role() string { return ComponentPrometheus } // SSH returns the host and SSH port of the instance func (s *PrometheusSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *PrometheusSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *PrometheusSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *PrometheusSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *PrometheusSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // MonitorComponent represents Monitor component. type MonitorComponent struct{ Topology } // Name implements Component interface. func (c *MonitorComponent) Name() string { return ComponentPrometheus } // Role implements Component interface. func (c *MonitorComponent) Role() string { return RoleMonitor } // Source implements Component interface. func (c *MonitorComponent) Source() string { return ComponentPrometheus } // CalculateVersion implements the Component interface func (c *MonitorComponent) CalculateVersion(clusterVersion string) string { // always not follow cluster version, use ""(latest) by default version := c.Topology.BaseTopo().PrometheusVersion if version != nil && *version != "" { return *version } return clusterVersion } // SetVersion implements Component interface. func (c *MonitorComponent) SetVersion(version string) { *c.Topology.BaseTopo().PrometheusVersion = version } // Instances implements Component interface. func (c *MonitorComponent) Instances() []Instance { servers := c.BaseTopo().Monitors ins := make([]Instance, 0, len(servers)) for _, rs := range servers { s := rs ports := []int{ s.Port, } if mopts := c.GetMonitoredOptions(); mopts != nil { ports = append(ports, mopts.BlackboxExporterPort, mopts.NodeExporterPort) } mi := &MonitorInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, NumaNode: s.NumaNode, NumaCores: "", Ports: ports, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: func(_ context.Context, timeout time.Duration, _ *tls.Config, _ ...string) string { return statusByHost(s.GetManageHost(), s.Port, "/-/ready", timeout, nil) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, c.Topology} if s.NgPort > 0 { mi.BaseInstance.Ports = append(mi.BaseInstance.Ports, s.NgPort) } ins = append(ins, mi) } return ins } // MonitorInstance represent the monitor instance type MonitorInstance struct { BaseInstance topo Topology } // handleRemoteWrite handles remote write configuration for NG monitoring func (i *MonitorInstance) handleRemoteWrite(spec *PrometheusSpec, monitoring *PrometheusSpec) { // When PromRemoteWriteToVM is false, remove any VM remote write configurations if !spec.PromRemoteWriteToVM { // If there are no remote write configurations, nothing to do if len(spec.RemoteConfig.RemoteWrite) == 0 { return } // Filter out any remote write configurations pointing to the VM endpoint filteredRemoteWrite := make([]map[string]any, 0) for _, rw := range spec.RemoteConfig.RemoteWrite { if url, ok := rw["url"].(string); ok { // Keep only non-VM remote write configurations if !strings.Contains(url, fmt.Sprintf("%s/api/v1/write", utils.JoinHostPort(monitoring.Host, monitoring.NgPort))) { filteredRemoteWrite = append(filteredRemoteWrite, rw) } } else { // Keep entries without URL or with non-string URL (shouldn't happen normally) filteredRemoteWrite = append(filteredRemoteWrite, rw) } } spec.RemoteConfig.RemoteWrite = filteredRemoteWrite return } if monitoring.NgPort <= 0 { return } // monitor do not support tls for itself remoteWriteURL := fmt.Sprintf("http://%s/api/v1/write", utils.JoinHostPort(monitoring.Host, monitoring.NgPort)) // Check if this URL already exists in remote write configs urlExists := false if spec.RemoteConfig.RemoteWrite != nil { for _, rw := range spec.RemoteConfig.RemoteWrite { if url, ok := rw["url"].(string); ok && url == remoteWriteURL { urlExists = true break } } } if !urlExists { remoteWrite := map[string]any{ "url": remoteWriteURL, } if spec.RemoteConfig.RemoteWrite == nil { spec.RemoteConfig.RemoteWrite = []map[string]any{remoteWrite} } else { spec.RemoteConfig.RemoteWrite = append(spec.RemoteConfig.RemoteWrite, remoteWrite) } } } // InitConfig implement Instance interface func (i *MonitorInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { gOpts := *i.topo.BaseTopo().GlobalOptions if err := i.BaseInstance.InitConfig(ctx, e, gOpts, deployUser, paths); err != nil { return err } enableTLS := gOpts.TLSEnabled // transfer run script spec := i.InstanceSpec.(*PrometheusSpec) cfg := &scripts.PrometheusScript{ Port: spec.Port, WebExternalURL: fmt.Sprintf("http://%s", utils.JoinHostPort(spec.Host, spec.Port)), Retention: getRetention(spec.Retention), EnableNG: spec.NgPort > 0, EnablePromAgentMode: spec.EnablePromAgentMode, // Get from spec directly DeployDir: paths.Deploy, LogDir: paths.Log, DataDir: paths.Data[0], NumaNode: spec.NumaNode, AdditionalArgs: spec.AdditionalArgs, } // Check if agent mode is enabled in additional arguments if !cfg.EnablePromAgentMode { if slices.Contains(spec.AdditionalArgs, "--enable-feature=agent") { cfg.EnablePromAgentMode = true } } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_prometheus_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_prometheus.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } topoHasField := func(field string) (reflect.Value, bool) { return findSliceField(i.topo, field) } monitoredOptions := i.topo.GetMonitoredOptions() // transfer config cfig := config.NewPrometheusConfig(clusterName, clusterVersion, enableTLS) if monitoredOptions != nil { cfig.AddBlackbox(i.GetHost(), uint64(monitoredOptions.BlackboxExporterPort)) } cfig.ScrapeInterval = spec.ScrapeInterval cfig.ScrapeTimeout = spec.ScrapeTimeout uniqueHosts := set.NewStringSet() if servers, found := topoHasField("PDServers"); found { for i := 0; i < servers.Len(); i++ { pd := servers.Index(i).Interface().(*PDSpec) uniqueHosts.Insert(pd.Host) cfig.AddPD(pd.Host, uint64(pd.ClientPort)) } } if servers, found := topoHasField("TSOServers"); found { for i := 0; i < servers.Len(); i++ { tso := servers.Index(i).Interface().(*TSOSpec) uniqueHosts.Insert(tso.Host) cfig.AddTSO(tso.Host, uint64(tso.Port)) } } if servers, found := topoHasField("SchedulingServers"); found { for i := 0; i < servers.Len(); i++ { scheduling := servers.Index(i).Interface().(*SchedulingSpec) uniqueHosts.Insert(scheduling.Host) cfig.AddScheduling(scheduling.Host, uint64(scheduling.Port)) } } if servers, found := topoHasField("TiKVServers"); found { for i := 0; i < servers.Len(); i++ { kv := servers.Index(i).Interface().(*TiKVSpec) uniqueHosts.Insert(kv.Host) cfig.AddTiKV(kv.Host, uint64(kv.StatusPort)) } } if servers, found := topoHasField("TiDBServers"); found { for i := 0; i < servers.Len(); i++ { db := servers.Index(i).Interface().(*TiDBSpec) uniqueHosts.Insert(db.Host) cfig.AddTiDB(db.Host, uint64(db.StatusPort)) } } if servers, found := topoHasField("TiProxyServers"); found { for i := 0; i < servers.Len(); i++ { db := servers.Index(i).Interface().(*TiProxySpec) uniqueHosts.Insert(db.Host) cfig.AddTiProxy(db.Host, uint64(db.StatusPort)) } } if servers, found := topoHasField("TiFlashServers"); found { for i := 0; i < servers.Len(); i++ { flash := servers.Index(i).Interface().(*TiFlashSpec) uniqueHosts.Insert(flash.Host) cfig.AddTiFlashLearner(flash.Host, uint64(flash.FlashProxyStatusPort)) cfig.AddTiFlash(flash.Host, uint64(flash.StatusPort)) } } if servers, found := topoHasField("PumpServers"); found { for i := 0; i < servers.Len(); i++ { pump := servers.Index(i).Interface().(*PumpSpec) uniqueHosts.Insert(pump.Host) cfig.AddPump(pump.Host, uint64(pump.Port)) } } if servers, found := topoHasField("Drainers"); found { for i := 0; i < servers.Len(); i++ { drainer := servers.Index(i).Interface().(*DrainerSpec) uniqueHosts.Insert(drainer.Host) cfig.AddDrainer(drainer.Host, uint64(drainer.Port)) } } if servers, found := topoHasField("CDCServers"); found { for i := 0; i < servers.Len(); i++ { cdc := servers.Index(i).Interface().(*CDCSpec) uniqueHosts.Insert(cdc.Host) cfig.AddCDC(cdc.Host, uint64(cdc.Port)) } } if servers, found := topoHasField("TiKVCDCServers"); found { for i := 0; i < servers.Len(); i++ { tikvCdc := servers.Index(i).Interface().(*TiKVCDCSpec) uniqueHosts.Insert(tikvCdc.Host) cfig.AddTiKVCDC(tikvCdc.Host, uint64(tikvCdc.Port)) } } if servers, found := topoHasField("Monitors"); found { for idx := 0; idx < servers.Len(); idx++ { monitoring := servers.Index(idx).Interface().(*PrometheusSpec) uniqueHosts.Insert(monitoring.Host) } } if servers, found := topoHasField("Grafanas"); found { for i := 0; i < servers.Len(); i++ { grafana := servers.Index(i).Interface().(*GrafanaSpec) uniqueHosts.Insert(grafana.Host) cfig.AddGrafana(grafana.Host, uint64(grafana.Port)) } } if servers, found := topoHasField("Alertmanagers"); found { for i := 0; i < servers.Len(); i++ { alertmanager := servers.Index(i).Interface().(*AlertmanagerSpec) uniqueHosts.Insert(alertmanager.Host) cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) } } if servers, found := topoHasField("Masters"); found { for i := 0; i < servers.Len(); i++ { master := reflect.Indirect(servers.Index(i)) host, port := master.FieldByName("Host").String(), master.FieldByName("Port").Int() uniqueHosts.Insert(host) cfig.AddDMMaster(host, uint64(port)) } } if servers, found := topoHasField("Workers"); found { for i := 0; i < servers.Len(); i++ { worker := reflect.Indirect(servers.Index(i)) host, port := worker.FieldByName("Host").String(), worker.FieldByName("Port").Int() uniqueHosts.Insert(host) cfig.AddDMWorker(host, uint64(port)) } } if monitoredOptions != nil { for host := range uniqueHosts { cfig.AddNodeExpoertor(host, uint64(monitoredOptions.NodeExporterPort)) cfig.AddBlackboxExporter(host, uint64(monitoredOptions.BlackboxExporterPort)) cfig.AddMonitoredServer(host) } } // doesn't work if _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil { return err } for _, alertmanager := range spec.ExternalAlertmanagers { cfig.AddAlertmanager(alertmanager.Host, uint64(alertmanager.WebPort)) } cfig.AddPushgateway(spec.PushgatewayAddrs) if spec.RuleDir != "" { filter := func(name string) bool { return strings.HasSuffix(name, ".rules.yml") } err := i.IteratorLocalConfigDir(ctx, spec.RuleDir, filter, func(name string) error { cfig.AddLocalRule(name) return nil }) if err != nil { return errors.Annotate(err, "add local rule") } } if err := i.installRules(ctx, e, paths.Deploy, clusterName, clusterVersion); err != nil { return errors.Annotate(err, "install rules") } if err := i.initRules(ctx, e, spec, paths, clusterName); err != nil { return err } if spec.NgPort > 0 { pds := []string{} if servers, found := topoHasField("PDServers"); found { for i := 0; i < servers.Len(); i++ { pd := servers.Index(i).Interface().(*PDSpec) pds = append(pds, fmt.Sprintf("\"%s\"", utils.JoinHostPort(pd.Host, pd.ClientPort))) } } ngcfg := &config.NgMonitoringConfig{ ClusterName: clusterName, Address: utils.JoinHostPort(i.GetListenHost(), spec.NgPort), AdvertiseAddress: utils.JoinHostPort(i.GetHost(), spec.NgPort), PDAddrs: strings.Join(pds, ","), TLSEnabled: enableTLS, DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, } if servers, found := topoHasField("Monitors"); found { for idx := 0; idx < servers.Len(); idx++ { monitoring := servers.Index(idx).Interface().(*PrometheusSpec) cfig.AddNGMonitoring(monitoring.Host, uint64(monitoring.NgPort)) i.handleRemoteWrite(spec, monitoring) } } fp = filepath.Join(paths.Cache, fmt.Sprintf("ngmonitoring_%s_%d.toml", i.GetHost(), i.GetPort())) if err := ngcfg.ConfigToFile(fp); err != nil { return err } dst = filepath.Join(paths.Deploy, "conf", "ngmonitoring.toml") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } } // set remote config remoteCfg, err := encodeRemoteCfg2Yaml(spec.RemoteConfig) if err != nil { return err } cfig.SetRemoteConfig(string(remoteCfg)) fp = filepath.Join(paths.Cache, fmt.Sprintf("prometheus_%s_%d.yml", i.GetHost(), i.GetPort())) // Generate config file with agent mode consideration if spec.EnablePromAgentMode { // Use agent mode configuration (without rule_files section) configBytes, err := cfig.ConfigWithAgentMode(true) if err != nil { return err } if err := utils.WriteFile(fp, configBytes, 0644); err != nil { return err } } else { // Use normal configuration if err := cfig.ConfigToFile(fp); err != nil { return err } } if spec.AdditionalScrapeConf != nil { err = mergeAdditionalScrapeConf(fp, spec.AdditionalScrapeConf) if err != nil { return err } } dst = filepath.Join(paths.Deploy, "conf", "prometheus.yml") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), clusterVersion, i.OS(), i.Arch(), i.ComponentName()+".yml", paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *MonitorInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } // We only really installRules for dm cluster because the rules(*.rules.yml) packed with the prometheus // component is designed for tidb cluster (the dm cluster use the same prometheus component with tidb // cluster), and the rules for dm cluster is packed in the dm-master component. So if deploying tidb // cluster, the rules is correct, if deploying dm cluster, we should remove rules for tidb and install // rules for dm. func (i *MonitorInstance) installRules(ctx context.Context, e ctxt.Executor, deployDir, clusterName, clusterVersion string) error { if i.topo.Type() != TopoTypeDM { return nil } tmp := filepath.Join(deployDir, "_tiup_tmp") _, stderr, err := e.Execute(ctx, fmt.Sprintf("mkdir -p %s", tmp), false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } srcPath := PackagePath(GetDMMasterPackageName(i.topo), clusterVersion, i.OS(), i.Arch()) dstPath := filepath.Join(tmp, filepath.Base(srcPath)) err = e.Transfer(ctx, srcPath, dstPath, false, 0, false) if err != nil { return err } cmd := fmt.Sprintf(`tar --no-same-owner -zxf %s -C %s && rm %s`, dstPath, tmp, dstPath) _, stderr, err = e.Execute(ctx, cmd, false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } // copy dm-master/conf/*.rules.yml targetDir := filepath.Join(deployDir, "bin", "prometheus") cmds := []string{ "mkdir -p %[1]s", `find %[1]s -type f -name "*.rules.yml" -delete`, `find %[2]s/dm-master/conf -type f -name "*.rules.yml" -exec cp {} %[1]s \;`, "rm -rf %[2]s", `find %[1]s -maxdepth 1 -type f -name "*.rules.yml" -exec sed -i 's/ENV_LABELS_ENV/%[3]s/g' {} \;`, } _, stderr, err = e.Execute(ctx, fmt.Sprintf(strings.Join(cmds, " && "), targetDir, tmp, clusterName), false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } return nil } func (i *MonitorInstance) initRules(ctx context.Context, e ctxt.Executor, spec *PrometheusSpec, paths meta.DirPaths, clusterName string) error { // To make this step idempotent, we need cleanup old rules first cmds := []string{ "mkdir -p %[1]s/conf", `find %[1]s/conf -type f -name "*.rules.yml" -delete`, `find %[1]s/bin/prometheus -maxdepth 1 -type f -name "*.rules.yml" -exec cp {} %[1]s/conf/ \;`, `find %[1]s/conf -maxdepth 1 -type f -name "*.rules.yml" -exec sed -i -e 's/ENV_LABELS_ENV/%[2]s/g' {} \;`, } _, stderr, err := e.Execute(ctx, fmt.Sprintf(strings.Join(cmds, " && "), paths.Deploy, clusterName), false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } // render cluster name when monitoring_servers.rule_dir is set if spec.RuleDir != "" { err := i.TransferLocalConfigDir(ctx, e, spec.RuleDir, path.Join(paths.Deploy, "conf"), func(name string) bool { return strings.HasSuffix(name, ".rules.yml") }) if err != nil { return err } // only need to render the cluster name cmds = []string{ `find %[1]s/conf -maxdepth 1 -type f -name "*.rules.yml" -exec sed -i -e 's/env: [^ ]*/env: %[2]s/g' {} \;`, `find %[1]s/conf -maxdepth 1 -type f -name "*.rules.yml" -exec sed -i -e 's/cluster: [^ ]*,/cluster: %[2]s,/g' {} \;`, } _, stderr, err := e.Execute(ctx, fmt.Sprintf(strings.Join(cmds, " && "), paths.Deploy, clusterName), false) if err != nil { return errors.Annotatef(err, "stderr: %s", string(stderr)) } } return nil } // ScaleConfig deploy temporary config on scaling func (i *MonitorInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName string, clusterVersion string, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = topo return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } func mergeAdditionalScrapeConf(source string, addition map[string]any) error { var result map[string]any bytes, err := os.ReadFile(source) if err != nil { return err } err = yaml.Unmarshal(bytes, &result) if err != nil { return err } for _, job := range result["scrape_configs"].([]any) { for k, v := range addition { job.(map[string]any)[k] = v } } bytes, err = yaml.Marshal(result) if err != nil { return err } return utils.WriteFile(source, bytes, 0644) } func getRetention(retention string) string { valid, _ := regexp.MatchString("^[1-9]\\d*d$", retention) if retention == "" || !valid { return "30d" } return retention } tiup-1.16.3/pkg/cluster/spec/monitoring_test.go000066400000000000000000000275701505422223000215400ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "fmt" "os" "os/user" "path" "path/filepath" "testing" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" ) func TestLocalRuleDirs(t *testing.T) { deployDir, err := os.MkdirTemp("", "tiup-*") assert.Nil(t, err) defer os.RemoveAll(deployDir) err = utils.MkdirAll(path.Join(deployDir, "bin/prometheus"), 0755) assert.Nil(t, err) localDir, err := filepath.Abs("./testdata/rules") assert.Nil(t, err) err = os.WriteFile(path.Join(deployDir, "bin/prometheus", "dummy.rules.yml"), []byte("dummy"), 0644) assert.Nil(t, err) topo := new(Specification) topo.Monitors = append(topo.Monitors, &PrometheusSpec{ Host: "127.0.0.1", Port: 9090, RuleDir: localDir, }) comp := MonitorComponent{topo} ints := comp.Instances() assert.Equal(t, len(ints), 1) promInstance := ints[0].(*MonitorInstance) assert.Contains(t, promInstance.Ports, topo.GetMonitoredOptions().NodeExporterPort) assert.Contains(t, promInstance.Ports, topo.GetMonitoredOptions().BlackboxExporterPort) user, err := user.Current() assert.Nil(t, err) e, err := executor.New(executor.SSHTypeNone, false, executor.SSHConfig{Host: "127.0.0.1", User: user.Username}) assert.Nil(t, err) ctx := checkpoint.NewContext(context.Background()) err = promInstance.initRules(ctx, e, promInstance.InstanceSpec.(*PrometheusSpec), meta.DirPaths{Deploy: deployDir}, "dummy-cluster") assert.Nil(t, err) assert.FileExists(t, path.Join(deployDir, "conf", "dummy.rules.yml")) fs, err := os.ReadDir(localDir) assert.Nil(t, err) for _, f := range fs { assert.FileExists(t, path.Join(deployDir, "conf", f.Name())) } } func TestNoLocalRuleDirs(t *testing.T) { deployDir, err := os.MkdirTemp("", "tiup-*") assert.Nil(t, err) defer os.RemoveAll(deployDir) err = utils.MkdirAll(path.Join(deployDir, "bin/prometheus"), 0755) assert.Nil(t, err) localDir, err := filepath.Abs("./testdata/rules") assert.Nil(t, err) err = os.WriteFile(path.Join(deployDir, "bin/prometheus", "dummy.rules.yml"), []byte(` groups: - name: alert.rules rules: - alert: TiDB_schema_error expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 for: 1m labels: env: ENV_LABELS_ENV level: emergency expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 annotations: description: "cluster: ENV_LABELS_ENV, instance: {{ $labels.instance }}, values:{{ $value }}" value: "{{ $value }}" summary: TiDB schema error `), 0644) assert.Nil(t, err) topo := new(Specification) topo.Monitors = append(topo.Monitors, &PrometheusSpec{ Host: "127.0.0.1", Port: 9090, }) comp := MonitorComponent{topo} ints := comp.Instances() assert.Equal(t, len(ints), 1) promInstance := ints[0].(*MonitorInstance) user, err := user.Current() assert.Nil(t, err) e, err := executor.New(executor.SSHTypeNone, false, executor.SSHConfig{Host: "127.0.0.1", User: user.Username}) assert.Nil(t, err) ctx := checkpoint.NewContext(context.Background()) err = promInstance.initRules(ctx, e, promInstance.InstanceSpec.(*PrometheusSpec), meta.DirPaths{Deploy: deployDir}, "dummy-cluster") assert.Nil(t, err) body, err := os.ReadFile(path.Join(deployDir, "conf", "dummy.rules.yml")) assert.Nil(t, err) assert.Contains(t, string(body), "dummy-cluster") assert.NotContains(t, string(body), "ENV_LABELS_ENV") assert.FileExists(t, path.Join(deployDir, "conf", "dummy.rules.yml")) fs, err := os.ReadDir(localDir) assert.Nil(t, err) for _, f := range fs { assert.NoFileExists(t, path.Join(deployDir, "conf", f.Name())) } } func TestMergeAdditionalScrapeConf(t *testing.T) { file, err := os.CreateTemp("", "tiup-cluster-spec-test") if err != nil { panic(fmt.Sprintf("create temp file: %s", err)) } defer os.Remove(file.Name()) _, err = file.WriteString(`--- global: scrape_interval: 15s # By default, scrape targets every 15 seconds. evaluation_interval: 15s # By default, scrape targets every 15 seconds. # scrape_timeout is set to the global default (10s). external_labels: cluster: 'test' monitor: "prometheus" scrape_configs: - job_name: "tidb" honor_labels: true # don't overwrite job & instance labels static_configs: - targets: - '192.168.122.215:10080' - job_name: "tikv" honor_labels: true # don't overwrite job & instance labels static_configs: - targets: - '192.168.122.25:20180'`) assert.Nil(t, err) expected := `global: evaluation_interval: 15s external_labels: cluster: test monitor: prometheus scrape_interval: 15s scrape_configs: - honor_labels: true job_name: tidb metric_relabel_configs: - action: drop regex: tikv_thread_nonvoluntary_context_switches|tikv_thread_voluntary_context_switches|tikv_threads_io_bytes_total separator: ; source_labels: - __name__ - action: drop regex: tikv_thread_cpu_seconds_total;(tokio|rocksdb).+ separator: ; source_labels: - __name__ - name static_configs: - targets: - 192.168.122.215:10080 - honor_labels: true job_name: tikv metric_relabel_configs: - action: drop regex: tikv_thread_nonvoluntary_context_switches|tikv_thread_voluntary_context_switches|tikv_threads_io_bytes_total separator: ; source_labels: - __name__ - action: drop regex: tikv_thread_cpu_seconds_total;(tokio|rocksdb).+ separator: ; source_labels: - __name__ - name static_configs: - targets: - 192.168.122.25:20180 ` var addition map[string]any err = yaml.Unmarshal([]byte(`metric_relabel_configs: - source_labels: [__name__] separator: ; regex: tikv_thread_nonvoluntary_context_switches|tikv_thread_voluntary_context_switches|tikv_threads_io_bytes_total action: drop - source_labels: [__name__,name] separator: ; regex: tikv_thread_cpu_seconds_total;(tokio|rocksdb).+ action: drop`), &addition) assert.Nil(t, err) err = mergeAdditionalScrapeConf(file.Name(), addition) assert.Nil(t, err) result, err := os.ReadFile(file.Name()) assert.Nil(t, err) assert.Equal(t, expected, string(result)) } func TestGetRetention(t *testing.T) { var val string val = getRetention("-1d") assert.EqualValues(t, "30d", val) val = getRetention("0d") assert.EqualValues(t, "30d", val) val = getRetention("01d") assert.EqualValues(t, "30d", val) val = getRetention("1dd") assert.EqualValues(t, "30d", val) val = getRetention("*1d") assert.EqualValues(t, "30d", val) val = getRetention("1d ") assert.EqualValues(t, "30d", val) val = getRetention("ddd") assert.EqualValues(t, "30d", val) val = getRetention("60d") assert.EqualValues(t, "60d", val) val = getRetention("999d") assert.EqualValues(t, "999d", val) } // TestHandleRemoteWrite verifies that remote write configurations are properly handled func TestHandleRemoteWrite(t *testing.T) { // Create spec and monitoring instances spec := &PrometheusSpec{ Host: "192.168.1.10", Port: 9090, PromRemoteWriteToVM: true, } monitoring := &PrometheusSpec{ Host: "192.168.1.20", NgPort: 12020, } // Set up expected remote write URL expectedURL := fmt.Sprintf("http://%s/api/v1/write", utils.JoinHostPort(monitoring.Host, monitoring.NgPort)) monitorInstance := &MonitorInstance{ BaseInstance: BaseInstance{ InstanceSpec: spec, Host: spec.Host, Port: spec.Port, SSHP: 22, }, } // Execute handleRemoteWrite monitorInstance.handleRemoteWrite(spec, monitoring) // Check remote write config was added assert.Len(t, spec.RemoteConfig.RemoteWrite, 1) assert.Equal(t, expectedURL, spec.RemoteConfig.RemoteWrite[0]["url"]) // Add the same remote write URL again monitorInstance.handleRemoteWrite(spec, monitoring) // Check that no duplicate remote write config was added assert.Len(t, spec.RemoteConfig.RemoteWrite, 1) assert.Equal(t, expectedURL, spec.RemoteConfig.RemoteWrite[0]["url"]) } // TestPromRemoteWriteToVM tests remote write configuration func TestPromRemoteWriteToVM(t *testing.T) { // Create a PrometheusSpec with PromRemoteWriteToVM spec := PrometheusSpec{ Host: "127.0.0.1", Port: 9090, PromRemoteWriteToVM: true, } // Validate field is accessible assert.True(t, spec.PromRemoteWriteToVM) // Test setting field spec.PromRemoteWriteToVM = false assert.False(t, spec.PromRemoteWriteToVM) } // TestVMRemoteWriteYAMLBackwardsCompatibility tests loading YAML with and without PromRemoteWriteToVM field func TestVMRemoteWriteYAMLBackwardsCompatibility(t *testing.T) { // Old YAML without PromRemoteWriteToVM oldYAML := ` host: 127.0.0.1 port: 9090 ng_port: 12020 ` // New YAML with PromRemoteWriteToVM newYAML := ` host: 127.0.0.1 port: 9090 ng_port: 12020 prom_remote_write_to_vm: true ` // Test unmarshaling old YAML var oldSpec PrometheusSpec err := yaml.Unmarshal([]byte(oldYAML), &oldSpec) assert.NoError(t, err) // Default value should be false assert.False(t, oldSpec.PromRemoteWriteToVM) // Test unmarshaling new YAML var newSpec PrometheusSpec err = yaml.Unmarshal([]byte(newYAML), &newSpec) assert.NoError(t, err) // New value should match what's in the YAML assert.True(t, newSpec.PromRemoteWriteToVM) } // TestHandleRemoteWriteDisabled tests that VM remote write configuration is removed when PromRemoteWriteToVM is false func TestHandleRemoteWriteDisabled(t *testing.T) { // Create spec with existing remote write config and PromRemoteWriteToVM=false spec := &PrometheusSpec{ Host: "192.168.1.10", Port: 9090, PromRemoteWriteToVM: false, } monitoring := &PrometheusSpec{ Host: "192.168.1.20", NgPort: 12020, } // Add VM remote write URL vmURL := fmt.Sprintf("http://%s/api/v1/write", utils.JoinHostPort(monitoring.Host, monitoring.NgPort)) spec.RemoteConfig.RemoteWrite = []map[string]any{ {"url": vmURL}, } // Add another remote write URL that should be preserved otherURL := "http://some-other-target:9090/api/v1/write" spec.RemoteConfig.RemoteWrite = append(spec.RemoteConfig.RemoteWrite, map[string]any{ "url": otherURL, }) monitorInstance := &MonitorInstance{ BaseInstance: BaseInstance{ InstanceSpec: spec, Host: spec.Host, Port: spec.Port, SSHP: 22, }, } // Execute handleRemoteWrite with PromRemoteWriteToVM=false monitorInstance.handleRemoteWrite(spec, monitoring) // Check that VM remote write config was removed but other config was preserved assert.Len(t, spec.RemoteConfig.RemoteWrite, 1) assert.Equal(t, otherURL, spec.RemoteConfig.RemoteWrite[0]["url"]) // Test with no remote write configs spec.RemoteConfig.RemoteWrite = nil monitorInstance.handleRemoteWrite(spec, monitoring) // No remote write configs should still be nil/empty assert.Empty(t, spec.RemoteConfig.RemoteWrite) // Now test with PromRemoteWriteToVM toggled back to true spec.PromRemoteWriteToVM = true monitorInstance.handleRemoteWrite(spec, monitoring) // VM remote write config should be added back assert.Len(t, spec.RemoteConfig.RemoteWrite, 1) assert.Equal(t, vmURL, spec.RemoteConfig.RemoteWrite[0]["url"]) } tiup-1.16.3/pkg/cluster/spec/parse_topology.go000066400000000000000000000117011505422223000213470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "os" "path" "reflect" "strings" "github.com/joomcode/errorx" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" "gopkg.in/yaml.v3" ) var ( defaultDeployUser = "tidb" errNSTopolohy = errorx.NewNamespace("topology") // ErrTopologyReadFailed is ErrTopologyReadFailed ErrTopologyReadFailed = errNSTopolohy.NewType("read_failed", utils.ErrTraitPreCheck) // ErrTopologyParseFailed is ErrTopologyParseFailed ErrTopologyParseFailed = errNSTopolohy.NewType("parse_failed", utils.ErrTraitPreCheck) ) // ReadYamlFile read yaml content from file` func ReadYamlFile(file string) ([]byte, error) { suggestionProps := map[string]string{ "File": file, } yamlFile, err := os.ReadFile(file) if err != nil { return nil, ErrTopologyReadFailed. Wrap(err, "Failed to read topology file %s", file). WithProperty(tui.SuggestionFromTemplate(` Please check whether your topology file {{ColorKeyword}}{{.File}}{{ColorReset}} exists and try again. To generate a sample topology file: {{ColorCommand}}{{OsArgs0}} template topology > topo.yaml{{ColorReset}} `, suggestionProps)) } return yamlFile, nil } // ParseTopologyYaml read yaml content from `file` and unmarshal it to `out` // ignoreGlobal ignore global variables in file, only ignoreGlobal with a index of 0 is effective func ParseTopologyYaml(file string, out Topology, ignoreGlobal ...bool) error { suggestionProps := map[string]string{ "File": file, } zap.L().Debug("Parse topology file", zap.String("file", file)) yamlFile, err := ReadYamlFile(file) if err != nil { return err } // keep the global config in out if len(ignoreGlobal) > 0 && ignoreGlobal[0] { var newTopo map[string]any if err := yaml.Unmarshal(yamlFile, &newTopo); err != nil { return err } for k := range newTopo { switch k { case "global", "monitored", "server_configs": delete(newTopo, k) } } yamlFile, _ = yaml.Marshal(newTopo) } decoder := yaml.NewDecoder(bytes.NewReader(yamlFile)) decoder.KnownFields(true) if err = decoder.Decode(out); err != nil { return ErrTopologyParseFailed. Wrap(err, "Failed to parse topology file %s", file). WithProperty(tui.SuggestionFromTemplate(` Please check the syntax of your topology file {{ColorKeyword}}{{.File}}{{ColorReset}} and try again. `, suggestionProps)) } zap.L().Debug("Parse topology file succeeded", zap.Any("topology", out)) return nil } // ExpandRelativeDir fill DeployDir, DataDir and LogDir to absolute path func ExpandRelativeDir(topo Topology) { expandRelativePath(deployUser(topo), topo) } func expandRelativePath(user string, topo any) { v := reflect.Indirect(reflect.ValueOf(topo).Elem()) switch v.Kind() { case reflect.Slice: for i := 0; i < v.Len(); i++ { ref := reflect.New(v.Index(i).Type()) ref.Elem().Set(v.Index(i)) expandRelativePath(user, ref.Interface()) v.Index(i).Set(ref.Elem()) } case reflect.Struct: // We should deal with DeployDir first, because DataDir and LogDir depends on it dirs := []string{"DeployDir", "DataDir", "LogDir"} for _, dir := range dirs { f := v.FieldByName(dir) if !f.IsValid() || f.String() == "" { continue } switch dir { case "DeployDir": f.SetString(Abs(user, f.String())) case "DataDir": // Some components supports multiple data dirs split by comma ds := strings.Split(f.String(), ",") ads := []string{} for _, d := range ds { if strings.HasPrefix(d, "/") { ads = append(ads, d) } else { ads = append(ads, path.Join(v.FieldByName("DeployDir").String(), d)) } } f.SetString(strings.Join(ads, ",")) case "LogDir": if !strings.HasPrefix(f.String(), "/") { f.SetString(path.Join(v.FieldByName("DeployDir").String(), f.String())) } } } // Deal with all fields (expandRelativePath will do nothing on string filed) for i := 0; i < v.NumField(); i++ { // We don't deal with GlobalOptions because relative path in GlobalOptions.Data has special meaning if v.Type().Field(i).Name == "GlobalOptions" { continue } ref := reflect.New(v.Field(i).Type()) ref.Elem().Set(v.Field(i)) expandRelativePath(user, ref.Interface()) v.Field(i).Set(ref.Elem()) } case reflect.Ptr: expandRelativePath(user, v.Interface()) } } func deployUser(topo Topology) string { base := topo.BaseTopo() if base.GlobalOptions == nil || base.GlobalOptions.User == "" { return defaultDeployUser } return base.GlobalOptions.User } tiup-1.16.3/pkg/cluster/spec/parse_topology_test.go000066400000000000000000000465141505422223000224200ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "os" "path/filepath" "testing" "github.com/stretchr/testify/require" ) func withTempFile(t *testing.T, content string, fn func(string)) { file, err := os.CreateTemp("/tmp", "topology-test") require.NoError(t, err) defer os.Remove(file.Name()) _, err = file.WriteString(content) require.NoError(t, err) file.Close() fn(file.Name()) } func with2TempFile(t *testing.T, content1, content2 string, fn func(string, string)) { withTempFile(t, content1, func(file1 string) { withTempFile(t, content2, func(file2 string) { fn(file1, file2) }) }) } func TestParseTopologyYaml(t *testing.T) { file := filepath.Join("testdata", "topology_err.yaml") topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) } func TestParseTopologyYamlIgnoreGlobal(t *testing.T) { file := filepath.Join("testdata", "topology_err.yaml") topo := Specification{} err := ParseTopologyYaml(file, &topo, true) if topo.GlobalOptions.DeployDir == "/tidb/deploy" { t.Error("Can not ignore global variables") } require.NoError(t, err) } func TestRelativePath(t *testing.T) { // test relative path withTempFile(t, ` tikv_servers: - host: 172.16.5.140 deploy_dir: my-deploy `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/my-deploy", topo.TiKVServers[0].DeployDir) }) // test data dir & log dir withTempFile(t, ` tikv_servers: - host: 172.16.5.140 deploy_dir: my-deploy data_dir: my-data log_dir: my-log `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/my-deploy", topo.TiKVServers[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/my-data", topo.TiKVServers[0].DataDir) require.Equal(t, "/home/tidb/my-deploy/my-log", topo.TiKVServers[0].LogDir) }) // test global options, case 1 withTempFile(t, ` global: deploy_dir: my-deploy tikv_servers: - host: 172.16.5.140 `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "my-deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20160", topo.TiKVServers[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20160/data", topo.TiKVServers[0].DataDir) }) // test global options, case 2 withTempFile(t, ` global: deploy_dir: my-deploy tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 - host: 172.16.5.140 port: 20161 status_port: 20181 `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "my-deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20160", topo.TiKVServers[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20160/data", topo.TiKVServers[0].DataDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20161", topo.TiKVServers[1].DeployDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20161/data", topo.TiKVServers[1].DataDir) }) // test global options, case 3 withTempFile(t, ` global: deploy_dir: my-deploy tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 data_dir: my-data log_dir: my-log - host: 172.16.5.140 port: 20161 status_port: 20181 `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) require.Equal(t, "my-deploy/monitor-9100", topo.MonitoredOptions.DeployDir) require.Equal(t, "data/monitor-9100", topo.MonitoredOptions.DataDir) require.Equal(t, "my-deploy/monitor-9100/log", topo.MonitoredOptions.LogDir) ExpandRelativeDir(&topo) require.Equal(t, "my-deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "/home/tidb/my-deploy/monitor-9100", topo.MonitoredOptions.DeployDir) require.Equal(t, "/home/tidb/my-deploy/monitor-9100/data/monitor-9100", topo.MonitoredOptions.DataDir) require.Equal(t, "/home/tidb/my-deploy/monitor-9100/my-deploy/monitor-9100/log", topo.MonitoredOptions.LogDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20160", topo.TiKVServers[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20160/my-data", topo.TiKVServers[0].DataDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20160/my-log", topo.TiKVServers[0].LogDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20161", topo.TiKVServers[1].DeployDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20161/data", topo.TiKVServers[1].DataDir) require.Equal(t, "/home/tidb/my-deploy/tikv-20161/log", topo.TiKVServers[1].LogDir) }) // test global options, case 4 withTempFile(t, ` global: data_dir: my-global-data log_dir: my-global-log tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 data_dir: my-local-data log_dir: my-local-log - host: 172.16.5.140 port: 20161 status_port: 20181 `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "my-global-data", topo.GlobalOptions.DataDir) require.Equal(t, "my-global-log", topo.GlobalOptions.LogDir) require.Equal(t, "/home/tidb/deploy/monitor-9100", topo.MonitoredOptions.DeployDir) require.Equal(t, "/home/tidb/deploy/monitor-9100/my-global-data/monitor-9100", topo.MonitoredOptions.DataDir) require.Equal(t, "/home/tidb/deploy/monitor-9100/deploy/monitor-9100/log", topo.MonitoredOptions.LogDir) require.Equal(t, "/home/tidb/deploy/tikv-20160", topo.TiKVServers[0].DeployDir) require.Equal(t, "/home/tidb/deploy/tikv-20160/my-local-data", topo.TiKVServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tikv-20160/my-local-log", topo.TiKVServers[0].LogDir) require.Equal(t, "/home/tidb/deploy/tikv-20161", topo.TiKVServers[1].DeployDir) require.Equal(t, "/home/tidb/deploy/tikv-20161/my-global-data", topo.TiKVServers[1].DataDir) require.Equal(t, "/home/tidb/deploy/tikv-20161/my-global-log", topo.TiKVServers[1].LogDir) }) // test multiple dir, case 5 withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /path/to/my-first-data,my-second-data `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/path/to/my-first-data,/home/tidb/deploy/tiflash-9000/my-second-data", topo.TiFlashServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/log", topo.TiFlashServers[0].LogDir) }) // test global options, case 6 withTempFile(t, ` global: user: test data_dir: my-global-data log_dir: my-global-log tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 deploy_dir: my-local-deploy data_dir: my-local-data log_dir: my-local-log - host: 172.16.5.140 port: 20161 status_port: 20181 `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "my-global-data", topo.GlobalOptions.DataDir) require.Equal(t, "my-global-log", topo.GlobalOptions.LogDir) require.Equal(t, "/home/test/my-local-deploy", topo.TiKVServers[0].DeployDir) require.Equal(t, "/home/test/my-local-deploy/my-local-data", topo.TiKVServers[0].DataDir) require.Equal(t, "/home/test/my-local-deploy/my-local-log", topo.TiKVServers[0].LogDir) require.Equal(t, "/home/test/deploy/tikv-20161", topo.TiKVServers[1].DeployDir) require.Equal(t, "/home/test/deploy/tikv-20161/my-global-data", topo.TiKVServers[1].DataDir) require.Equal(t, "/home/test/deploy/tikv-20161/my-global-log", topo.TiKVServers[1].LogDir) }) } func TestTiFlashStorage(t *testing.T) { // test tiflash storage section, 'storage.main.dir' should not be defined in server_configs withTempFile(t, ` server_configs: tiflash: storage.main.dir: [/data1/tiflash] tiflash_servers: - host: 172.16.5.140 `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.Error(t, err) }) // test tiflash storage section, 'storage.latest.dir' should not be defined in server_configs withTempFile(t, ` server_configs: tiflash: storage.latest.dir: [/data1/tiflash] tiflash_servers: - host: 172.16.5.140 `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.Error(t, err) }) // test tiflash storage section defined data dir // test for deprecated setting, for backward compatibility withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /ssd0/tiflash config: `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/ssd0/tiflash", topo.TiFlashServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/log", topo.TiFlashServers[0].LogDir) }) // test tiflash storage section defined data dir withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /ssd0/tiflash,/ssd1/tiflash,/ssd2/tiflash config: storage.main.dir: [/ssd0/tiflash, /ssd1/tiflash, /ssd2/tiflash] storage.latest.dir: [/ssd0/tiflash, /ssd1/tiflash, /ssd2/tiflash] `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/ssd0/tiflash,/ssd1/tiflash,/ssd2/tiflash", topo.TiFlashServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/log", topo.TiFlashServers[0].LogDir) }) // test tiflash storage section defined data dir, "data_dir" will be ignored withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 # if storage.main.dir is defined, data_dir will be ignored data_dir: /hdd0/tiflash config: storage.main.dir: [/ssd0/tiflash, /ssd1/tiflash, /ssd2/tiflash] `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/ssd0/tiflash,/ssd1/tiflash,/ssd2/tiflash", topo.TiFlashServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/log", topo.TiFlashServers[0].LogDir) }) // test tiflash storage section defined data dir // if storage.latest.dir is not empty, the first path in // storage.latest.dir will be the first path in 'DataDir' // DataDir is the union set of storage.latest.dir and storage.main.dir withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /ssd0/tiflash config: storage.main.dir: [/hdd0/tiflash, /hdd1/tiflash, /hdd2/tiflash] storage.latest.dir: [/ssd0/tiflash, /ssd1/tiflash, /ssd2/tiflash, /hdd0/tiflash] `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/ssd0/tiflash,/hdd0/tiflash,/hdd1/tiflash,/hdd2/tiflash,/ssd1/tiflash,/ssd2/tiflash", topo.TiFlashServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/log", topo.TiFlashServers[0].LogDir) }) // test if there is only one path in storage.main.dir withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /hhd0/tiflash config: storage.main.dir: [/ssd0/tiflash] `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) ExpandRelativeDir(&topo) require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/ssd0/tiflash", topo.TiFlashServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/log", topo.TiFlashServers[0].LogDir) }) // test tiflash storage.latest section defined data dir // should always define storage.main.dir if 'storage.latest' is defined withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /ssd0/tiflash config: #storage.main.dir: [/hdd0/tiflash, /hdd1/tiflash, /hdd2/tiflash] storage.latest.dir: [/ssd0/tiflash, /ssd1/tiflash, /ssd2/tiflash, /hdd0/tiflash] `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.Error(t, err) }) // test tiflash storage.raft section defined data dir // should always define storage.main.dir if 'storage.raft' is defined withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /ssd0/tiflash config: #storage.main.dir: [/hdd0/tiflash, /hdd1/tiflash, /hdd2/tiflash] storage.raft.dir: [/ssd0/tiflash, /ssd1/tiflash, /ssd2/tiflash, /hdd0/tiflash] `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.Error(t, err) }) // test tiflash storage.remote section defined data dir // should be fine even when `storage.main.dir` is not defined. withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /ssd0/tiflash config: storage.remote.dir: /tmp/tiflash/remote `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.NoError(t, err) }) // test tiflash storage section defined data dir // storage.main.dir should always use absolute path withTempFile(t, ` tiflash_servers: - host: 172.16.5.140 data_dir: /ssd0/tiflash config: storage.main.dir: [tiflash/data, ] storage.latest.dir: [/ssd0/tiflash, /ssd1/tiflash, /ssd2/tiflash, /hdd0/tiflash] `, func(file string) { topo := Specification{} err := ParseTopologyYaml(file, &topo) require.Error(t, err) }) } func merge4test(base, scale string) (*Specification, error) { baseTopo := Specification{} if err := ParseTopologyYaml(base, &baseTopo); err != nil { return nil, err } scaleTopo := baseTopo.NewPart() if err := ParseTopologyYaml(scale, scaleTopo); err != nil { return nil, err } mergedTopo := baseTopo.MergeTopo(scaleTopo) if err := mergedTopo.Validate(); err != nil { return nil, err } return mergedTopo.(*Specification), nil } func TestTopologyMerge(t *testing.T) { // base test with2TempFile(t, ` tiflash_servers: - host: 172.16.5.140 `, ` tiflash_servers: - host: 172.16.5.139 `, func(base, scale string) { topo, err := merge4test(base, scale) require.NoError(t, err) ExpandRelativeDir(topo) ExpandRelativeDir(topo) // should be idempotent require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/data", topo.TiFlashServers[0].DataDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000", topo.TiFlashServers[1].DeployDir) require.Equal(t, "/home/tidb/deploy/tiflash-9000/data", topo.TiFlashServers[1].DataDir) }) // test global option overwrite with2TempFile(t, ` global: user: test deploy_dir: /my-global-deploy tiflash_servers: - host: 172.16.5.140 log_dir: my-local-log-tiflash data_dir: my-local-data-tiflash - host: 172.16.5.175 deploy_dir: flash-deploy - host: 172.16.5.141 `, ` tiflash_servers: - host: 172.16.5.139 deploy_dir: flash-deploy - host: 172.16.5.134 `, func(base, scale string) { topo, err := merge4test(base, scale) require.NoError(t, err) ExpandRelativeDir(topo) require.Equal(t, "/my-global-deploy/tiflash-9000", topo.TiFlashServers[0].DeployDir) require.Equal(t, "/my-global-deploy/tiflash-9000/my-local-data-tiflash", topo.TiFlashServers[0].DataDir) require.Equal(t, "/my-global-deploy/tiflash-9000/my-local-log-tiflash", topo.TiFlashServers[0].LogDir) require.Equal(t, "/home/test/flash-deploy", topo.TiFlashServers[1].DeployDir) require.Equal(t, "/home/test/flash-deploy/data", topo.TiFlashServers[1].DataDir) require.Equal(t, "/home/test/flash-deploy", topo.TiFlashServers[3].DeployDir) require.Equal(t, "/home/test/flash-deploy/data", topo.TiFlashServers[3].DataDir) require.Equal(t, "/my-global-deploy/tiflash-9000", topo.TiFlashServers[2].DeployDir) require.Equal(t, "/my-global-deploy/tiflash-9000/data", topo.TiFlashServers[2].DataDir) require.Equal(t, "/my-global-deploy/tiflash-9000", topo.TiFlashServers[4].DeployDir) require.Equal(t, "/my-global-deploy/tiflash-9000/data", topo.TiFlashServers[4].DataDir) }) } func TestMergeComponentVersions(t *testing.T) { // test component version overwrite with2TempFile(t, ` component_versions: tidb: v8.0.0 tikv: v8.0.0 tidb_servers: - host: 172.16.5.139 `, ` component_versions: tikv: v8.1.0 pd: v8.0.0 tidb_servers: - host: 172.16.5.134 `, func(base, scale string) { baseTopo := Specification{} require.NoError(t, ParseTopologyYaml(base, &baseTopo)) scaleTopo := baseTopo.NewPart() require.NoError(t, ParseTopologyYaml(scale, scaleTopo)) mergedTopo := baseTopo.MergeTopo(scaleTopo) require.NoError(t, mergedTopo.Validate()) require.Equal(t, scaleTopo.(*Specification).ComponentVersions, mergedTopo.(*Specification).ComponentVersions) require.Equal(t, "v8.0.0", scaleTopo.(*Specification).ComponentVersions.TiDB) require.Equal(t, "v8.1.0", scaleTopo.(*Specification).ComponentVersions.TiKV) require.Equal(t, "v8.0.0", scaleTopo.(*Specification).ComponentVersions.PD) }) } func TestFixRelativePath(t *testing.T) { // base test topo := Specification{ TiKVServers: []*TiKVSpec{ { DeployDir: "my-deploy", }, }, } expandRelativePath("tidb", &topo) require.Equal(t, "/home/tidb/my-deploy", topo.TiKVServers[0].DeployDir) // test data dir & log dir topo = Specification{ TiKVServers: []*TiKVSpec{ { DeployDir: "my-deploy", DataDir: "my-data", LogDir: "my-log", }, }, } expandRelativePath("tidb", &topo) require.Equal(t, "/home/tidb/my-deploy", topo.TiKVServers[0].DeployDir) require.Equal(t, "/home/tidb/my-deploy/my-data", topo.TiKVServers[0].DataDir) require.Equal(t, "/home/tidb/my-deploy/my-log", topo.TiKVServers[0].LogDir) // test global options topo = Specification{ GlobalOptions: GlobalOptions{ DeployDir: "my-deploy", DataDir: "my-data", LogDir: "my-log", }, TiKVServers: []*TiKVSpec{ {}, }, } expandRelativePath("tidb", &topo) require.Equal(t, "my-deploy", topo.GlobalOptions.DeployDir) require.Equal(t, "my-data", topo.GlobalOptions.DataDir) require.Equal(t, "my-log", topo.GlobalOptions.LogDir) require.Equal(t, "", topo.TiKVServers[0].DeployDir) require.Equal(t, "", topo.TiKVServers[0].DataDir) require.Equal(t, "", topo.TiKVServers[0].LogDir) } tiup-1.16.3/pkg/cluster/spec/pd.go000066400000000000000000000362751505422223000167210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "os" "path/filepath" "slices" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // PDSpec represents the PD topology specification in topology.yaml type PDSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` ListenHost string `yaml:"listen_host,omitempty"` AdvertiseClientAddr string `yaml:"advertise_client_addr,omitempty"` AdvertisePeerAddr string `yaml:"advertise_peer_addr,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` // Use Name to get the name with a default value if it's empty. Name string `yaml:"name"` ClientPort int `yaml:"client_port" default:"2379"` PeerPort int `yaml:"peer_port" default:"2380"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *PDSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { if timeout < time.Second { timeout = statusQueryTimeout } addr := utils.JoinHostPort(s.GetManageHost(), s.ClientPort) pc := api.NewPDClient(ctx, []string{addr}, timeout, tlsCfg) // check health err := pc.CheckHealth() if err != nil { return "Down" } // find leader node leader, err := pc.GetLeader() if err != nil { return "ERR" } res := "Up" if s.Name == leader.Name { res += "|L" } return res } // Role returns the component role of the instance func (s *PDSpec) Role() string { return ComponentPD } // SSH returns the host and SSH port of the instance func (s *PDSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *PDSpec) GetMainPort() int { return s.ClientPort } // GetManageHost returns the manage host of the instance func (s *PDSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *PDSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *PDSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // GetAdvertiseClientURL returns AdvertiseClientURL func (s *PDSpec) GetAdvertiseClientURL(enableTLS bool) string { if s.AdvertiseClientAddr != "" { return s.AdvertiseClientAddr } scheme := utils.Ternary(enableTLS, "https", "http").(string) return fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(s.Host, s.ClientPort)) } // GetAdvertisePeerURL returns AdvertisePeerURL func (s *PDSpec) GetAdvertisePeerURL(enableTLS bool) string { if s.AdvertisePeerAddr != "" { return s.AdvertisePeerAddr } scheme := utils.Ternary(enableTLS, "https", "http").(string) return fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(s.Host, s.PeerPort)) } // PDComponent represents PD component. type PDComponent struct{ Topology *Specification } // Name implements Component interface. func (c *PDComponent) Name() string { return ComponentPD } // Role implements Component interface. func (c *PDComponent) Role() string { return ComponentPD } // Source implements Component interface. func (c *PDComponent) Source() string { source := c.Topology.ComponentSources.PD if source != "" { return source } return ComponentPD } // CalculateVersion implements the Component interface func (c *PDComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.PD if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *PDComponent) SetVersion(version string) { c.Topology.ComponentVersions.PD = version } // Instances implements Component interface. func (c *PDComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.PDServers)) for _, s := range c.Topology.PDServers { ins = append(ins, &PDInstance{ Name: s.Name, BaseInstance: BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: utils.Ternary(s.ListenHost != "", s.ListenHost, c.Topology.BaseTopo().GlobalOptions.ListenHost).(string), Port: s.ClientPort, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.ClientPort, s.PeerPort, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.ClientPort, timeout, tlsCfg) }, Component: c, }, topo: c.Topology, }) } return ins } // PDInstance represent the PD instance type PDInstance struct { Name string BaseInstance topo Topology } // InitConfig implement Instance interface func (i *PDInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*PDSpec) scheme := utils.Ternary(enableTLS, "https", "http").(string) version := i.CalculateVersion(clusterVersion) initialCluster := []string{} for _, pdspec := range topo.PDServers { initialCluster = append(initialCluster, fmt.Sprintf("%s=%s", pdspec.Name, pdspec.GetAdvertisePeerURL(enableTLS))) } cfg := &scripts.PDScript{ Name: spec.Name, ClientURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.ClientPort)), AdvertiseClientURL: spec.GetAdvertiseClientURL(enableTLS), PeerURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.PeerPort)), AdvertisePeerURL: spec.GetAdvertisePeerURL(enableTLS), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, InitialCluster: strings.Join(initialCluster, ","), NumaNode: spec.NumaNode, MSMode: topo.GlobalOptions.PDMode == "ms", } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_pd_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_pd.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.PD // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( clusterName, AnsibleImportedConfigPath, fmt.Sprintf( "%s-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort(), ), ) importConfig, err := os.ReadFile(configPath) if err != nil { return err } globalConfig, err = mergeImported(importConfig, globalConfig) if err != nil { return err } } // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return err } if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), version, i.OS(), i.Arch(), i.ComponentName()+".toml", paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *PDInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { // set TLS configs if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.cacert-path"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.cert-path"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.key-path"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.cacert-path", "security.cert-path", "security.key-path", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // ScaleConfig deploy temporary config on scaling func (i *PDInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { // We need pd.toml here, but we don't need to check it if err := i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths); err != nil && errors.Cause(err) != ErrorCheckConfig { return err } cluster := mustBeClusterTopo(topo) spec := i.InstanceSpec.(*PDSpec) scheme := utils.Ternary(cluster.GlobalOptions.TLSEnabled, "https", "http").(string) initialCluster := []string{} for _, pdspec := range cluster.PDServers { initialCluster = append(initialCluster, fmt.Sprintf("%s=%s", pdspec.Name, pdspec.GetAdvertisePeerURL(cluster.GlobalOptions.TLSEnabled))) } cfg0 := &scripts.PDScript{ Name: spec.Name, ClientURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.ClientPort)), AdvertiseClientURL: spec.GetAdvertiseClientURL(cluster.GlobalOptions.TLSEnabled), PeerURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.PeerPort)), AdvertisePeerURL: spec.GetAdvertisePeerURL(cluster.GlobalOptions.TLSEnabled), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, InitialCluster: strings.Join(initialCluster, ","), NumaNode: spec.NumaNode, MSMode: cluster.GlobalOptions.PDMode == "ms", } join := []string{} for _, pdspec := range cluster.PDServers { join = append(join, pdspec.GetAdvertiseClientURL(cluster.GlobalOptions.TLSEnabled)) } cfg := scripts.NewPDScaleScript(cfg0, strings.Join(join, ",")) fp := filepath.Join(paths.Cache, fmt.Sprintf("run_pd_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_pd.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } return nil } var _ RollingUpdateInstance = &PDInstance{} // IsLeader checks if the instance is PD leader func (i *PDInstance) IsLeader(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config) (bool, error) { tidbTopo, ok := topo.(*Specification) if !ok { panic("topo should be type of tidb topology") } pdClient := api.NewPDClient(ctx, tidbTopo.GetPDListWithManageHost(), time.Second*5, tlsCfg) return i.checkLeader(pdClient) } func (i *PDInstance) checkLeader(pdClient *api.PDClient) (bool, error) { leader, err := pdClient.GetLeader() if err != nil { return false, errors.Annotatef(err, "failed to get PD leader %s", i.GetHost()) } return leader.Name == i.Name, nil } // PreRestart implements RollingUpdateInstance interface. func (i *PDInstance) PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, updcfg *UpdateConfig) error { timeoutOpt := &utils.RetryOption{ Timeout: time.Second * time.Duration(apiTimeoutSeconds), Delay: time.Second * 2, } tidbTopo, ok := topo.(*Specification) if !ok { panic("topo should be type of tidb topology") } pdClient := api.NewPDClient(ctx, tidbTopo.GetPDListWithManageHost(), time.Second*5, tlsCfg) isLeader, err := i.checkLeader(pdClient) if err != nil { return err } if len(tidbTopo.PDServers) > 1 && isLeader { members, err := pdClient.GetMembers() if err != nil { return err } var oldPriority int32 if m := slices.IndexFunc(members.Members, func(j *pdpb.Member) bool { return i.Name == j.Name }); m != -1 && members.Members[m].LeaderPriority != 0 { oldPriority = members.Members[m].LeaderPriority } if oldPriority != 0 { if err := pdClient.SetLeaderPriority(i.Name, 0); err != nil { return errors.Annotatef(err, "failed to clear PD leader priority[%d] %s", oldPriority, i.GetHost()) } } if err := pdClient.EvictPDLeader(timeoutOpt); err != nil { return errors.Annotatef(err, "failed to evict PD leader %s", i.GetHost()) } if err := pdClient.SetLeaderPriority(i.Name, oldPriority); err != nil { return errors.Annotatef(err, "failed to recover PD leader priority[%d] %s", oldPriority, i.GetHost()) } } return nil } // PostRestart implements RollingUpdateInstance interface. func (i *PDInstance) PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, updcfg *UpdateConfig) error { // When restarting the next PD, if the PD has not been fully started and has become the target of // the transfer leader, this may cause the PD service to be unavailable for about 10 seconds. timeoutOpt := utils.RetryOption{ Attempts: 100, Delay: time.Second, Timeout: 120 * time.Second, } currentPDAddrs := []string{utils.JoinHostPort(i.GetManageHost(), i.Port)} pdClient := api.NewPDClient(ctx, currentPDAddrs, 5*time.Second, tlsCfg) if err := utils.Retry(pdClient.CheckHealth, timeoutOpt); err != nil { return errors.Annotatef(err, "failed to start PD peer %s", i.GetHost()) } if updcfg.TargetVersion != "" && tidbver.PDSupportReadyAPI(updcfg.TargetVersion) { if err := utils.Retry(pdClient.CheckReady, timeoutOpt); err != nil { return errors.Annotatef(err, "failed to wait PD load all regions %s", i.GetHost()) } } return nil } tiup-1.16.3/pkg/cluster/spec/profile.go000066400000000000000000000060011505422223000177360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "os" "os/user" "path" "path/filepath" "github.com/pingcap/errors" tiuplocaldata "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/utils" ) // sub directory names const ( TiUPPackageCacheDir = "packages" TiUPClusterDir = "clusters" TiUPAuditDir = "audit" TLSCertKeyDir = "tls" TLSCertKeyDirWithAnsible = "ssl" TLSCACert = "ca.crt" TLSCAKey = "ca.pem" TLSClientCert = "client.crt" TLSClientKey = "client.pem" PFXClientCert = "client.pfx" ) var profileDir string // getHomeDir get the home directory of current user (if they have one). // The result path might be empty. func getHomeDir() (string, error) { u, err := user.Current() if err != nil { return "", errors.Trace(err) } return u.HomeDir, nil } var initialized = false // Initialize initializes the global variables of meta package. If the // environment variable TIUP_COMPONENT_DATA_DIR is set, it is used as root of // the profile directory, otherwise the `$HOME/.tiup` of current user is used. // The directory will be created before return if it does not already exist. func Initialize(base string) error { tiupData := os.Getenv(tiuplocaldata.EnvNameComponentDataDir) tiupHome := os.Getenv(tiuplocaldata.EnvNameHome) switch { case tiupData != "": profileDir = tiupData case tiupHome != "": profileDir = path.Join(tiupHome, tiuplocaldata.StorageParentDir, base) default: homeDir, err := getHomeDir() if err != nil { return errors.Trace(err) } profileDir = path.Join(homeDir, ".tiup", tiuplocaldata.StorageParentDir, base) } clusterBaseDir := filepath.Join(profileDir, TiUPClusterDir) tidbSpec = NewSpec(clusterBaseDir, func() Metadata { return &ClusterMeta{ Topology: new(Specification), } }) initialized = true // make sure the dir exist return utils.MkdirAll(profileDir, 0755) } // ProfileDir returns the full profile directory path of TiUP. func ProfileDir() string { return profileDir } // ProfilePath joins a path under the profile dir func ProfilePath(subpath ...string) string { return path.Join(append([]string{profileDir}, subpath...)...) } // ClusterPath returns the full path to a subpath (file or directory) of a // cluster, it is a subdir in the profile dir of the user, with the cluster name // as its name. // It is not guaranteed the path already exist. func ClusterPath(cluster string, subpath ...string) string { return GetSpecManager().Path(cluster, subpath...) } tiup-1.16.3/pkg/cluster/spec/pump.go000066400000000000000000000212751505422223000172710ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "os" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" ) // PumpSpec represents the Pump topology specification in topology.yaml type PumpSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"8250"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Offline bool `yaml:"offline,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *PumpSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { if timeout < time.Second { timeout = statusQueryTimeout } state := statusByHost(s.GetManageHost(), s.Port, "/status", timeout, tlsCfg) if s.Offline { binlogClient, err := api.NewBinlogClient(pdList, timeout, tlsCfg) if err != nil { return state } id := utils.JoinHostPort(s.Host, s.Port) tombstone, _ := binlogClient.IsPumpTombstone(ctx, id) if tombstone { state = "Tombstone" } else { state = "Pending Offline" } } return state } // Role returns the component role of the instance func (s *PumpSpec) Role() string { return ComponentPump } // SSH returns the host and SSH port of the instance func (s *PumpSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *PumpSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *PumpSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *PumpSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *PumpSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // PumpComponent represents Pump component. type PumpComponent struct{ Topology *Specification } // Name implements Component interface. func (c *PumpComponent) Name() string { return ComponentPump } // Role implements Component interface. func (c *PumpComponent) Role() string { return ComponentPump } // Source implements Component interface. func (c *PumpComponent) Source() string { source := c.Topology.ComponentSources.Pump if source != "" { return source } return ComponentPump } // CalculateVersion implements the Component interface func (c *PumpComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.Pump if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *PumpComponent) SetVersion(version string) { c.Topology.ComponentVersions.Pump = version } // Instances implements Component interface. func (c *PumpComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.PumpServers)) for _, s := range c.Topology.PumpServers { ins = append(ins, &PumpInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, c.Topology}) } return ins } // PumpInstance represent the Pump instance. type PumpInstance struct { BaseInstance topo Topology } // ScaleConfig deploy temporary config on scaling func (i *PumpInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } // InitConfig implements Instance interface. func (i *PumpInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*PumpSpec) nodeID := i.ID() // keep origin node id if is imported if i.IsImported() { nodeID = "" } pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, pdspec.GetAdvertiseClientURL(enableTLS)) } cfg := &scripts.PumpScript{ NodeID: nodeID, Addr: utils.JoinHostPort(i.GetListenHost(), spec.Port), AdvertiseAddr: utils.JoinHostPort(spec.Host, spec.Port), PD: strings.Join(pds, ","), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, NumaNode: spec.NumaNode, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_pump_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_pump.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.Pump // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( clusterName, AnsibleImportedConfigPath, fmt.Sprintf( "%s-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort(), ), ) importConfig, err := os.ReadFile(configPath) if err != nil { return err } globalConfig, err = mergeImported(importConfig, globalConfig) if err != nil { return err } } // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return err } return i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *PumpInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { // set TLS configs if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.ssl-ca"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.ssl-cert"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.ssl-key"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.ssl-ca", "security.ssl-cert", "security.ssl-key", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } tiup-1.16.3/pkg/cluster/spec/scheduling.go000066400000000000000000000246121505422223000204330ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) var schedulingService = "scheduling" // SchedulingSpec represents the scheduling topology specification in topology.yaml type SchedulingSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` ListenHost string `yaml:"listen_host,omitempty"` AdvertiseListenAddr string `yaml:"advertise_listen_addr,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` // Use Name to get the name with a default value if it's empty. Name string `yaml:"name,omitempty"` Port int `yaml:"port" default:"3379"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *SchedulingSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { if timeout < time.Second { timeout = statusQueryTimeout } addr := utils.JoinHostPort(s.GetManageHost(), s.Port) tc := api.NewSchedulingClient(ctx, []string{addr}, timeout, tlsCfg) pc := api.NewPDClient(ctx, pdList, timeout, tlsCfg) // check health err := tc.CheckHealth() if err != nil { return "Down" } primary, err := pc.GetServicePrimary(schedulingService) if err != nil { return "ERR" } res := "Up" enableTLS := false if tlsCfg != nil { enableTLS = true } if s.GetAdvertiseListenURL(enableTLS) == primary { res += "|P" } return res } // Role returns the component role of the instance func (s *SchedulingSpec) Role() string { return ComponentScheduling } // SSH returns the host and SSH port of the instance func (s *SchedulingSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *SchedulingSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *SchedulingSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *SchedulingSpec) IsImported() bool { return false } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *SchedulingSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // GetAdvertiseListenURL returns AdvertiseListenURL func (s *SchedulingSpec) GetAdvertiseListenURL(enableTLS bool) string { if s.AdvertiseListenAddr != "" { return s.AdvertiseListenAddr } scheme := utils.Ternary(enableTLS, "https", "http").(string) return fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(s.Host, s.Port)) } // SchedulingComponent represents scheduling component. type SchedulingComponent struct{ Topology *Specification } // Name implements Component interface. func (c *SchedulingComponent) Name() string { return ComponentScheduling } // Role implements Component interface. func (c *SchedulingComponent) Role() string { return ComponentScheduling } // Source implements Component interface. func (c *SchedulingComponent) Source() string { source := c.Topology.ComponentSources.PD if source != "" { return source } return ComponentPD } // CalculateVersion implements the Component interface func (c *SchedulingComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.Scheduling if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *SchedulingComponent) SetVersion(version string) { c.Topology.ComponentVersions.Scheduling = version } // Instances implements Component interface. func (c *SchedulingComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.SchedulingServers)) for _, s := range c.Topology.SchedulingServers { ins = append(ins, &SchedulingInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: utils.Ternary(s.ListenHost != "", s.ListenHost, c.Topology.BaseTopo().GlobalOptions.ListenHost).(string), Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, topo: c.Topology, }) } return ins } // SchedulingInstance represent the scheduling instance type SchedulingInstance struct { BaseInstance topo Topology } // InitConfig implement Instance interface func (i *SchedulingInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*SchedulingSpec) scheme := utils.Ternary(enableTLS, "https", "http").(string) version := i.CalculateVersion(clusterVersion) pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, pdspec.GetAdvertiseClientURL(enableTLS)) } cfg := &scripts.SchedulingScript{ Name: spec.Name, ListenURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.Port)), AdvertiseListenURL: spec.GetAdvertiseListenURL(enableTLS), BackendEndpoints: strings.Join(pds, ","), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, NumaNode: spec.NumaNode, } if !tidbver.PDSupportMicroservicesWithName(version) { cfg.Name = "" } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_scheduling_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_scheduling.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.Scheduling // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return err } if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), version, i.OS(), i.Arch(), i.ComponentName()+".toml", paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *SchedulingInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { // set TLS configs if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.cacert-path"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.cert-path"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.key-path"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.cacert-path", "security.cert-path", "security.key-path", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // IsPrimary checks if the instance is primary func (i *SchedulingInstance) IsPrimary(ctx context.Context, topo Topology, tlsCfg *tls.Config) (bool, error) { tidbTopo, ok := topo.(*Specification) if !ok { panic("topo should be type of tidb topology") } pdClient := api.NewPDClient(ctx, tidbTopo.GetPDListWithManageHost(), time.Second*5, tlsCfg) primary, err := pdClient.GetServicePrimary(schedulingService) if err != nil { return false, errors.Annotatef(err, "failed to get Scheduling primary %s", i.GetHost()) } spec := i.InstanceSpec.(*SchedulingSpec) enableTLS := false if tlsCfg != nil { enableTLS = true } return primary == spec.GetAdvertiseListenURL(enableTLS), nil } // ScaleConfig deploy temporary config on scaling func (i *SchedulingInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } var _ RollingUpdateInstance = &SchedulingInstance{} // PreRestart implements RollingUpdateInstance interface. func (i *SchedulingInstance) PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, updcfg *UpdateConfig) error { return nil } // PostRestart implements RollingUpdateInstance interface. func (i *SchedulingInstance) PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, updcfg *UpdateConfig) error { return nil } tiup-1.16.3/pkg/cluster/spec/server_config.go000066400000000000000000000212341505422223000211360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "context" "errors" "fmt" "os" "path" "reflect" "strings" "github.com/BurntSushi/toml" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/clusterutil" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" "gopkg.in/yaml.v3" ) const ( // AnsibleImportedConfigPath is the sub path where all imported configs are stored AnsibleImportedConfigPath = "ansible-imported-configs" // TempConfigPath is the sub path where generated temporary configs are stored TempConfigPath = "config-cache" // migrateLockName is the directory name of migrating lock migrateLockName = "tiup-migrate.lck" ) // ErrorCheckConfig represent error occurred in config check stage var ErrorCheckConfig = errors.New("check config failed") // strKeyMap tries to convert `map[any]any` to `map[string]any` func strKeyMap(val any) any { m, ok := val.(map[any]any) if ok { ret := map[string]any{} for k, v := range m { kk, ok := k.(string) if !ok { return val } ret[kk] = strKeyMap(v) } return ret } rv := reflect.ValueOf(val) if rv.Kind() == reflect.Slice { var ret []any for i := 0; i < rv.Len(); i++ { ret = append(ret, strKeyMap(rv.Index(i).Interface())) } return ret } return val } func foldKey(key string, val any) (string, any) { parts := strings.SplitN(key, ".", 2) if len(parts) == 1 { return key, strKeyMap(val) } subKey, subVal := foldKey(parts[1], val) return parts[0], map[string]any{ subKey: strKeyMap(subVal), } } func patch(origin map[string]any, key string, val any) { origVal, found := origin[key] if !found { origin[key] = strKeyMap(val) return } origMap, lhsOk := origVal.(map[string]any) valMap, rhsOk := val.(map[string]any) if lhsOk && rhsOk { for k, v := range valMap { patch(origMap, k, v) } } else { // overwrite origin[key] = strKeyMap(val) } } // FoldMap convert single layer map to multi-layer func FoldMap(ms map[string]any) map[string]any { // we flatten map first to deal with the case like: // a.b: // c.d: xxx ms = FlattenMap(ms) result := map[string]any{} for k, v := range ms { key, val := foldKey(k, v) patch(result, key, val) } return result } // FlattenMap convert mutil-layer map to single layer func FlattenMap(ms map[string]any) map[string]any { result := map[string]any{} for k, v := range ms { var sub map[string]any if m, ok := v.(map[string]any); ok { sub = FlattenMap(m) } else if m, ok := v.(map[any]any); ok { fixM := map[string]any{} for k, v := range m { if sk, ok := k.(string); ok { fixM[sk] = v } } sub = FlattenMap(fixM) } else { result[k] = v continue } for sk, sv := range sub { result[k+"."+sk] = sv } } return result } // MergeConfig merge two or more config into one and unflat them // config1: // // a.b.a: 1 // a.b.b: 2 // // config2: // // a.b.a: 3 // a.b.c: 4 // // config3: // // b.c = 5 // // After MergeConfig(config1, config2, config3): // // a: // b: // a: 3 // b: 2 // c: 4 // b: // c: 5 func MergeConfig(orig map[string]any, overwrites ...map[string]any) map[string]any { lhs := FoldMap(orig) for _, overwrite := range overwrites { rhs := FoldMap(overwrite) for k, v := range rhs { patch(lhs, k, v) } } return lhs } // GetValueFromPath try to find the value by path recursively func GetValueFromPath(m map[string]any, p string) any { ss := strings.Split(p, ".") searchMap := make(map[any]any) m = FoldMap(m) for k, v := range m { searchMap[k] = v } return searchValue(searchMap, ss) } func searchValue(m map[any]any, ss []string) any { l := len(ss) switch l { case 0: return m case 1: return m[ss[0]] } key := ss[0] if pm, ok := m[key].(map[any]any); ok { return searchValue(pm, ss[1:]) } else if pm, ok := m[key].(map[string]any); ok { searchMap := make(map[any]any) for k, v := range pm { searchMap[k] = v } return searchValue(searchMap, ss[1:]) } return nil } // Merge2Toml merge the config of global. func Merge2Toml(comp string, global, overwrite map[string]any) ([]byte, error) { lhs := MergeConfig(global, overwrite) buf := bytes.NewBufferString(fmt.Sprintf(`# WARNING: This file is auto-generated. Do not edit! All your modification will be overwritten! # You can use 'tiup cluster edit-config' and 'tiup cluster reload' to update the configuration # All configuration items you want to change can be added to: # server_configs: # %s: # aa.b1.c3: value # aa.b2.c4: value `, comp)) enc := toml.NewEncoder(buf) enc.Indent = "" err := enc.Encode(lhs) if err != nil { return nil, perrs.Trace(err) } return buf.Bytes(), nil } func encodeRemoteCfg2Yaml(remote Remote) ([]byte, error) { if len(remote.RemoteRead) == 0 && len(remote.RemoteWrite) == 0 { return []byte{}, nil } buf := bytes.NewBufferString("") enc := yaml.NewEncoder(buf) err := enc.Encode(remote) if err != nil { return nil, err } return buf.Bytes(), nil } func mergeImported(importConfig []byte, specConfigs ...map[string]any) (map[string]any, error) { var configData map[string]any if err := toml.Unmarshal(importConfig, &configData); err != nil { return nil, perrs.Trace(err) } // overwrite topology specifieced configs upon the imported configs lhs := MergeConfig(configData, specConfigs...) return lhs, nil } func checkConfig(ctx context.Context, e ctxt.Executor, componentName, componentSource, version, nodeOS, arch, config string, paths meta.DirPaths) error { var cmd string configPath := path.Join(paths.Deploy, "conf", config) switch componentName { case ComponentPrometheus: cmd = fmt.Sprintf("%s/bin/prometheus/promtool check config %s", paths.Deploy, configPath) case ComponentAlertmanager: cmd = fmt.Sprintf("%s/bin/alertmanager/amtool check-config %s", paths.Deploy, configPath) default: repo, err := clusterutil.NewRepository(nodeOS, arch) if err != nil { return perrs.Annotate(ErrorCheckConfig, err.Error()) } if utils.Version(version).IsNightly() { version = utils.NightlyVersionAlias } entry, err := repo.ComponentBinEntry(componentSource, version) if err != nil { return perrs.Annotate(ErrorCheckConfig, err.Error()) } binPath := path.Join(paths.Deploy, "bin", entry) // Skip old versions if !hasConfigCheckFlag(ctx, e, binPath) { return nil } extra := "" if componentName == ComponentTiKV { // Pass in an empty pd address and the correct data dir extra = fmt.Sprintf(`--pd "" --data-dir "%s"`, paths.Data[0]) } cmd = fmt.Sprintf("%s --config-check --config=%s %s", binPath, configPath, extra) } _, _, err := e.Execute(ctx, cmd, false) if err != nil { return perrs.Annotate(ErrorCheckConfig, err.Error()) } return nil } func hasConfigCheckFlag(ctx context.Context, e ctxt.Executor, binPath string) bool { stdout, stderr, _ := e.Execute(ctx, fmt.Sprintf("%s --help", binPath), false) return strings.Contains(string(stdout), "config-check") || strings.Contains(string(stderr), "config-check") } // HandleImportPathMigration tries to rename old configs file directory for imported clusters to the new name func HandleImportPathMigration(clsName string) error { dirPath := ClusterPath(clsName) targetPath := path.Join(dirPath, AnsibleImportedConfigPath) _, err := os.Stat(targetPath) if os.IsNotExist(err) { zap.L().Warn("renaming config dir", zap.String("orig", dirPath), zap.String("new", targetPath)) if lckErr := utils.Retry(func() error { _, lckErr := os.Stat(path.Join(dirPath, migrateLockName)) if os.IsNotExist(lckErr) { return nil } return perrs.Errorf("config dir already lock by another task, %s", lckErr) }); lckErr != nil { return lckErr } if lckErr := os.Mkdir(path.Join(dirPath, migrateLockName), 0755); lckErr != nil { return perrs.Errorf("can not lock config dir, %s", lckErr) } defer func() { rmErr := os.Remove(path.Join(dirPath, migrateLockName)) if rmErr != nil { zap.L().Error("error unlocking config dir", zap.Error(rmErr)) } }() // ignore if the old config path does not exist if _, err := os.Stat(path.Join(dirPath, "config")); os.IsNotExist(err) { return nil } return os.Rename(path.Join(dirPath, "config"), targetPath) } return nil } tiup-1.16.3/pkg/cluster/spec/server_config_test.go000066400000000000000000000061651505422223000222030ustar00rootroot00000000000000package spec import ( "bytes" "testing" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestMerge(t *testing.T) { yamlData := []byte(` server_configs: tidb: performance.feedback-probability: 12.0 `) topo := new(Specification) err := yaml.Unmarshal(yamlData, topo) require.NoError(t, err) yamlData, err = yaml.Marshal(topo) require.NoError(t, err) decimal := bytes.Contains(yamlData, []byte("12")) require.True(t, decimal) get, err := Merge2Toml("tidb", topo.ServerConfigs.TiDB, nil) require.NoError(t, err) decimal = bytes.Contains(get, []byte("12.0")) require.True(t, decimal) } func TestGetValueFromPath(t *testing.T) { yamlData := []byte(` server_configs: tidb: a.b.c.d: 1 a.b: c.e: 3 a.b.c: f: 4 h.i.j.k: [1, 2, 4] e: f: true `) topo := new(Specification) err := yaml.Unmarshal(yamlData, topo) require.NoError(t, err) require.Equal(t, 1, GetValueFromPath(topo.ServerConfigs.TiDB, "a.b.c.d")) require.Equal(t, 3, GetValueFromPath(topo.ServerConfigs.TiDB, "a.b.c.e")) require.Equal(t, 4, GetValueFromPath(topo.ServerConfigs.TiDB, "a.b.c.f")) require.Equal(t, []any{1, 2, 4}, GetValueFromPath(topo.ServerConfigs.TiDB, "h.i.j.k")) require.Equal(t, true, GetValueFromPath(topo.ServerConfigs.TiDB, "e.f")) } func TestFlattenMap(t *testing.T) { var ( m map[string]any r map[string]any ) m = map[string]any{ "a": 1, "b": map[string]any{ "c": 2, }, "d.e": 3, "f.g": map[string]any{ "h": 4, "i": 5, }, "j": []int{6, 7}, } r = FlattenMap(m) require.Equal(t, 1, r["a"]) require.Equal(t, 2, r["b.c"]) require.Equal(t, 3, r["d.e"]) require.Equal(t, 4, r["f.g.h"]) require.Equal(t, 5, r["f.g.i"]) require.Equal(t, []int{6, 7}, r["j"]) } func TestFoldMap(t *testing.T) { var ( m map[string]any r map[string]any ) m = map[string]any{ "a": 1, "b.c": 2, "b.d": 3, "e.f": map[string]any{ "g.h": 4, }, "i": map[string]any{ "j.k": 5, "l": 6, }, } r = FoldMap(m) require.Equal(t, map[string]any{ "a": 1, "b": map[string]any{ "c": 2, "d": 3, }, "e": map[string]any{ "f": map[string]any{ "g": map[string]any{ "h": 4, }, }, }, "i": map[string]any{ "j": map[string]any{ "k": 5, }, "l": 6, }, }, r) } func TestEncodeRemoteCfg(t *testing.T) { yamlData := []byte(`remote_write: - queue_config: batch_send_deadline: 5m capacity: 100000 max_samples_per_send: 10000 max_shards: 300 url: http://127.0.0.1:/8086/write remote_read: - url: http://127.0.0.1:/8086/read - url: http://127.0.0.1:/8087/read `) bs, err := encodeRemoteCfg2Yaml(Remote{ RemoteWrite: []map[string]any{ { "url": "http://127.0.0.1:/8086/write", "queue_config": map[string]any{ "batch_send_deadline": "5m", "capacity": 100000, "max_samples_per_send": 10000, "max_shards": 300, }, }, }, RemoteRead: []map[string]any{ { "url": "http://127.0.0.1:/8086/read", }, { "url": "http://127.0.0.1:/8087/read", }, }, }) require.NoError(t, err) require.Equal(t, yamlData, bs) } tiup-1.16.3/pkg/cluster/spec/spec.go000066400000000000000000001060301505422223000172330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "reflect" "strings" "sync" "time" "github.com/creasty/defaults" "github.com/joomcode/errorx" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" clientv3 "go.etcd.io/etcd/client/v3" "go.uber.org/zap" ) const ( // Timeout in second when querying node status statusQueryTimeout = 10 * time.Second // the prometheus metric name of start time of the process since unix epoch in seconds. promMetricStartTimeSeconds = "process_start_time_seconds" ) // FullHostType is the type of fullhost operations type FullHostType string const ( // FullArchType cpu-arch type FullArchType FullHostType = "Arch" // FullOSType kernel-name FullOSType FullHostType = "OS" ) // SystemdMode is the mode used by systemctl type SystemdMode string const ( // SystemMode system mode SystemMode SystemdMode = "system" // UserMode user mode UserMode SystemdMode = "user" ) // general role names var ( RoleMonitor = "monitor" RoleTiSparkMaster = "tispark-master" RoleTiSparkWorker = "tispark-worker" TopoTypeTiDB = "tidb-cluster" TopoTypeDM = "dm-cluster" ) type ( // InstanceSpec represent a instance specification InstanceSpec interface { Role() string SSH() (string, int) GetMainPort() int IsImported() bool IgnoreMonitorAgent() bool } // GlobalOptions represents the global options for all groups in topology // specification in topology.yaml GlobalOptions struct { User string `yaml:"user,omitempty" default:"tidb"` Group string `yaml:"group,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" default:"22" validate:"ssh_port:editable"` SSHType executor.SSHType `yaml:"ssh_type,omitempty" default:"builtin"` TLSEnabled bool `yaml:"enable_tls,omitempty"` ListenHost string `yaml:"listen_host,omitempty" validate:"listen_host:editable"` DeployDir string `yaml:"deploy_dir,omitempty" default:"deploy"` DataDir string `yaml:"data_dir,omitempty" default:"data"` LogDir string `yaml:"log_dir,omitempty"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` OS string `yaml:"os,omitempty" default:"linux"` Arch string `yaml:"arch,omitempty"` Custom any `yaml:"custom,omitempty" validate:"custom:ignore"` SystemdMode SystemdMode `yaml:"systemd_mode,omitempty" default:"system"` PDMode string `yaml:"pd_mode,omitempty" validate:"pd_mode:editable"` } // MonitoredOptions represents the monitored node configuration MonitoredOptions struct { NodeExporterPort int `yaml:"node_exporter_port,omitempty" default:"9100"` BlackboxExporterPort int `yaml:"blackbox_exporter_port,omitempty" default:"9115"` NodeExporterVersion string `yaml:"node_exporter_version,omitempty"` BlackboxExporterVersion string `yaml:"blackbox_exporter_version,omitempty"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` } // ServerConfigs represents the server runtime configuration ServerConfigs struct { TiDB map[string]any `yaml:"tidb"` TiKV map[string]any `yaml:"tikv"` PD map[string]any `yaml:"pd"` TSO map[string]any `yaml:"tso"` Scheduling map[string]any `yaml:"scheduling"` Dashboard map[string]any `yaml:"tidb_dashboard"` TiFlash map[string]any `yaml:"tiflash"` TiProxy map[string]any `yaml:"tiproxy"` TiFlashLearner map[string]any `yaml:"tiflash-learner"` Pump map[string]any `yaml:"pump"` Drainer map[string]any `yaml:"drainer"` CDC map[string]any `yaml:"cdc"` TiKVCDC map[string]any `yaml:"kvcdc"` Grafana map[string]string `yaml:"grafana"` } // ComponentVersions represents the versions of components ComponentVersions struct { TiDB string `yaml:"tidb,omitempty"` TiKV string `yaml:"tikv,omitempty"` TiFlash string `yaml:"tiflash,omitempty"` PD string `yaml:"pd,omitempty"` TSO string `yaml:"tso,omitempty"` Scheduling string `yaml:"scheduling,omitempty"` Dashboard string `yaml:"tidb_dashboard,omitempty"` Pump string `yaml:"pump,omitempty"` Drainer string `yaml:"drainer,omitempty"` CDC string `yaml:"cdc,omitempty"` TiKVCDC string `yaml:"kvcdc,omitempty"` TiProxy string `yaml:"tiproxy,omitempty"` Prometheus string `yaml:"prometheus,omitempty"` Grafana string `yaml:"grafana,omitempty"` AlertManager string `yaml:"alertmanager,omitempty"` // The versions of exporters are placed within the monitored section because they are not explicitly treated as separate components. // NodeExporter string `yaml:"node_exporter,omitempty"` // BlackboxExporter string `yaml:"blackbox_exporter,omitempty"` } // ComponentSources represents the source of components ComponentSources struct { TiDB string `yaml:"tidb,omitempty" validate:"tidb:editable"` TiKV string `yaml:"tikv,omitempty" validate:"tikv:editable"` TiFlash string `yaml:"tiflash,omitempty" validate:"tiflash:editable"` PD string `yaml:"pd,omitempty" validate:"pd:editable"` Dashboard string `yaml:"tidb_dashboard,omitempty" validate:"tidb_dashboard:editable"` Pump string `yaml:"pump,omitempty" validate:"pump:editable"` Drainer string `yaml:"drainer,omitempty" validate:"drainer:editable"` CDC string `yaml:"cdc,omitempty" validate:"cdc:editable"` TiKVCDC string `yaml:"kvcdc,omitempty" validate:"kvcdc:editable"` } // Specification represents the specification of topology.yaml Specification struct { GlobalOptions GlobalOptions `yaml:"global,omitempty" validate:"global:editable"` MonitoredOptions MonitoredOptions `yaml:"monitored,omitempty" validate:"monitored:editable"` ComponentVersions ComponentVersions `yaml:"component_versions,omitempty" validate:"component_versions:editable"` ComponentSources ComponentSources `yaml:"component_sources,omitempty" validate:"component_sources:editable"` ServerConfigs ServerConfigs `yaml:"server_configs,omitempty" validate:"server_configs:ignore"` TiDBServers []*TiDBSpec `yaml:"tidb_servers"` TiKVServers []*TiKVSpec `yaml:"tikv_servers"` TiFlashServers []*TiFlashSpec `yaml:"tiflash_servers"` TiProxyServers []*TiProxySpec `yaml:"tiproxy_servers"` PDServers []*PDSpec `yaml:"pd_servers"` TSOServers []*TSOSpec `yaml:"tso_servers,omitempty"` SchedulingServers []*SchedulingSpec `yaml:"scheduling_servers,omitempty"` DashboardServers []*DashboardSpec `yaml:"tidb_dashboard_servers,omitempty"` PumpServers []*PumpSpec `yaml:"pump_servers,omitempty"` Drainers []*DrainerSpec `yaml:"drainer_servers,omitempty"` CDCServers []*CDCSpec `yaml:"cdc_servers,omitempty"` TiKVCDCServers []*TiKVCDCSpec `yaml:"kvcdc_servers,omitempty"` TiSparkMasters []*TiSparkMasterSpec `yaml:"tispark_masters,omitempty"` TiSparkWorkers []*TiSparkWorkerSpec `yaml:"tispark_workers,omitempty"` Monitors []*PrometheusSpec `yaml:"monitoring_servers"` Grafanas []*GrafanaSpec `yaml:"grafana_servers,omitempty"` Alertmanagers []*AlertmanagerSpec `yaml:"alertmanager_servers,omitempty"` } ) // BaseTopo is the base info to topology. type BaseTopo struct { GlobalOptions *GlobalOptions MonitoredOptions *MonitoredOptions MasterList []string PrometheusVersion *string GrafanaVersion *string AlertManagerVersion *string Monitors []*PrometheusSpec Grafanas []*GrafanaSpec Alertmanagers []*AlertmanagerSpec } // Topology represents specification of the cluster. type Topology interface { Type() string BaseTopo() *BaseTopo // Validate validates the topology specification and produce error if the // specification invalid (e.g: port conflicts or directory conflicts) Validate() error // Instances() []Instance ComponentsByStartOrder() []Component ComponentsByStopOrder() []Component ComponentsByUpdateOrder(curVer string) []Component IterInstance(fn func(instance Instance), concurrency ...int) GetMonitoredOptions() *MonitoredOptions // count how many time a path is used by instances in cluster CountDir(host string, dir string) int TLSConfig(dir string) (*tls.Config, error) Merge(that Topology) Topology FillHostArchOrOS(hostArchmap map[string]string, fullType FullHostType) error GetGrafanaConfig() map[string]string ScaleOutTopology } // BaseMeta is the base info of metadata. type BaseMeta struct { User string Group string Version string OpsVer *string `yaml:"last_ops_ver,omitempty"` // the version of ourself that updated the meta last time } // Metadata of a cluster. type Metadata interface { GetTopology() Topology SetTopology(topo Topology) GetBaseMeta() *BaseMeta UpgradableMetadata } // ScaleOutTopology represents a scale out metadata. type ScaleOutTopology interface { // Inherit existing global configuration. We must assign the inherited values before unmarshalling // because some default value rely on the global options and monitored options. // TODO: we should separate the unmarshal and setting default value. NewPart() Topology MergeTopo(topo Topology) Topology } // UpgradableMetadata represents a upgradable Metadata. type UpgradableMetadata interface { SetVersion(s string) SetUser(u string) } // NewPart implements ScaleOutTopology interface. func (s *Specification) NewPart() Topology { return &Specification{ GlobalOptions: s.GlobalOptions, MonitoredOptions: s.MonitoredOptions, ServerConfigs: s.ServerConfigs, ComponentVersions: s.ComponentVersions, } } // MergeTopo implements ScaleOutTopology interface. func (s *Specification) MergeTopo(topo Topology) Topology { other, ok := topo.(*Specification) if !ok { panic("topo should be Specification") } return s.Merge(other) } // GetMonitoredOptions implements Topology interface. func (s *Specification) GetMonitoredOptions() *MonitoredOptions { return &s.MonitoredOptions } // TLSConfig generates a tls.Config for the specification as needed func (s *Specification) TLSConfig(dir string) (*tls.Config, error) { if !s.GlobalOptions.TLSEnabled { return nil, nil } tlsConfig, err := LoadClientCert(dir) if err != nil { return nil, errorx.EnsureStackTrace(err). WithProperty(tui.SuggestionFromString("TLS is enabled, but the TLS configuration cannot be obtained")) } return tlsConfig, nil } // Type implements Topology interface. func (s *Specification) Type() string { return TopoTypeTiDB } // BaseTopo implements Topology interface. func (s *Specification) BaseTopo() *BaseTopo { return &BaseTopo{ GlobalOptions: &s.GlobalOptions, MonitoredOptions: s.GetMonitoredOptions(), MasterList: s.GetPDListWithManageHost(), PrometheusVersion: &s.ComponentVersions.Prometheus, GrafanaVersion: &s.ComponentVersions.Grafana, AlertManagerVersion: &s.ComponentVersions.AlertManager, Monitors: s.Monitors, Grafanas: s.Grafanas, Alertmanagers: s.Alertmanagers, } } // LocationLabels returns replication.location-labels in PD config func (s *Specification) LocationLabels() ([]string, error) { lbs := []string{} // We don't allow user define location-labels in instance config for _, pd := range s.PDServers { if GetValueFromPath(pd.Config, "replication.location-labels") != nil { return nil, errors.Errorf( "replication.location-labels can't be defined in instance %s:%d, please move it to the global server_configs field", pd.Host, pd.GetMainPort(), ) } } if repLbs := GetValueFromPath(s.ServerConfigs.PD, "replication.location-labels"); repLbs != nil { for _, l := range repLbs.([]any) { lb, ok := l.(string) if !ok { return nil, errors.Errorf("replication.location-labels contains non-string label: %v", l) } lbs = append(lbs, lb) } } return lbs, nil } // GetTiKVLabels implements TiKVLabelProvider func (s *Specification) GetTiKVLabels() (map[string]map[string]string, []map[string]api.LabelInfo, error) { kvs := s.TiKVServers locationLabels := map[string]map[string]string{} for _, kv := range kvs { address := utils.JoinHostPort(kv.Host, kv.GetMainPort()) var err error if locationLabels[address], err = kv.Labels(); err != nil { return nil, nil, err } } return locationLabels, nil, nil } // AllComponentNames contains the names of all components. // should include all components in ComponentsByStartOrder func AllComponentNames() (roles []string) { tp := &Specification{} tp.IterComponent(func(c Component) { switch c.Name() { case ComponentTiSpark: // tispark-{master, worker} roles = append(roles, c.Role()) default: roles = append(roles, c.Name()) } }) return } // UnmarshalYAML implements the yaml.Unmarshaler interface, // it sets the default values when unmarshaling the topology file func (s *Specification) UnmarshalYAML(unmarshal func(any) error) error { type topology Specification if err := unmarshal((*topology)(s)); err != nil { return err } // set default values from tag if err := defaults.Set(s); err != nil { return errors.Trace(err) } // Set monitored options if s.MonitoredOptions.DeployDir == "" { s.MonitoredOptions.DeployDir = filepath.Join(s.GlobalOptions.DeployDir, fmt.Sprintf("%s-%d", RoleMonitor, s.MonitoredOptions.NodeExporterPort)) } if s.MonitoredOptions.DataDir == "" { s.MonitoredOptions.DataDir = filepath.Join(s.GlobalOptions.DataDir, fmt.Sprintf("%s-%d", RoleMonitor, s.MonitoredOptions.NodeExporterPort)) } if s.MonitoredOptions.LogDir == "" { s.MonitoredOptions.LogDir = "log" } if !strings.HasPrefix(s.MonitoredOptions.LogDir, "/") && !strings.HasPrefix(s.MonitoredOptions.LogDir, s.MonitoredOptions.DeployDir) { s.MonitoredOptions.LogDir = filepath.Join(s.MonitoredOptions.DeployDir, s.MonitoredOptions.LogDir) } // populate custom default values as needed if err := fillCustomDefaults(&s.GlobalOptions, s); err != nil { return err } // Rewrite TiFlashSpec.DataDir since we may override it with configurations. // Should do it before validatation because we need to detect dir conflicts. for i := 0; i < len(s.TiFlashServers); i++ { dataDir, err := s.TiFlashServers[i].GetOverrideDataDir() if err != nil { return err } if s.TiFlashServers[i].DataDir != dataDir { zap.L().Info( "tiflash data dir is overwritten by its storage configuration", zap.String("host", s.TiFlashServers[i].Host), zap.String("dir", dataDir), ) s.TiFlashServers[i].DataDir = dataDir } } // --initial-commit-ts should not be recorded at run_drainer.sh #1682 s.removeCommitTS() return s.Validate() } func findField(v reflect.Value, fieldName string) (int, bool) { for i := 0; i < reflect.Indirect(v).NumField(); i++ { if reflect.Indirect(v).Type().Field(i).Name == fieldName { return i, true } } return -1, false } func findSliceField(v Topology, fieldName string) (reflect.Value, bool) { topo := reflect.ValueOf(v) if topo.Kind() == reflect.Ptr { topo = topo.Elem() } j, found := findField(topo, fieldName) if found { val := topo.Field(j) if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { return val, true } } return reflect.Value{}, false } // GetPDList returns a list of PD API hosts of the current cluster func (s *Specification) GetPDList() []string { var pdList []string for _, pd := range s.PDServers { pdList = append(pdList, utils.JoinHostPort(pd.Host, pd.ClientPort)) } return pdList } // GetPDListWithManageHost returns a list of PD API hosts of the current cluster func (s *Specification) GetPDListWithManageHost() []string { var pdList []string for _, pd := range s.PDServers { pdList = append(pdList, utils.JoinHostPort(pd.GetManageHost(), pd.ClientPort)) } return pdList } // GetCDCListWithManageHost returns a list of CDC API hosts of the current cluster func (s *Specification) GetCDCListWithManageHost() []string { var result []string for _, server := range s.CDCServers { host := server.Host if server.ManageHost != "" { host = server.ManageHost } result = append(result, utils.JoinHostPort(host, server.Port)) } return result } // AdjustByVersion modify the spec by cluster version. func (s *Specification) AdjustByVersion(clusterVersion string) { // CDC does not support data dir for version below v4.0.13, and also v5.0.0-rc, set it to empty. if !tidbver.TiCDCSupportConfigFile(clusterVersion) { for _, server := range s.CDCServers { server.DataDir = "" } } if tidbver.NgMonitorDeployByDefault(clusterVersion) { for _, m := range s.Monitors { if m.NgPort == 0 { m.NgPort = 12020 } } } } // GetPDDashboardAddress returns the cluster's dashboard addr func (s *Specification) GetPDDashboardAddress(ctx context.Context, tlsCfg *tls.Config, timeout time.Duration, pdList ...string) (string, error) { if timeout < time.Second { timeout = statusQueryTimeout } pc := api.NewPDClient(ctx, pdList, timeout, tlsCfg) dashboardAddr, err := pc.GetDashboardAddress() if err != nil { return "", err } if strings.HasPrefix(dashboardAddr, "http") { r := strings.NewReplacer("http://", "", "https://", "") dashboardAddr = r.Replace(dashboardAddr) } return dashboardAddr, nil } // GetEtcdClient loads EtcdClient of current cluster func (s *Specification) GetEtcdClient(tlsCfg *tls.Config) (*clientv3.Client, error) { return clientv3.New(clientv3.Config{ Endpoints: s.GetPDListWithManageHost(), TLS: tlsCfg, }) } // GetEtcdProxyClient loads EtcdClient of current cluster with TCP proxy func (s *Specification) GetEtcdProxyClient(tlsCfg *tls.Config, tcpProxy *proxy.TCPProxy) (*clientv3.Client, chan struct{}, error) { closeC := tcpProxy.Run(s.GetPDListWithManageHost()) cli, err := clientv3.New(clientv3.Config{ Endpoints: tcpProxy.GetEndpoints(), TLS: tlsCfg, }) return cli, closeC, err } // Merge returns a new Specification which sum old ones func (s *Specification) Merge(that Topology) Topology { spec := that.(*Specification) return &Specification{ GlobalOptions: s.GlobalOptions, MonitoredOptions: s.MonitoredOptions, ServerConfigs: s.ServerConfigs, ComponentVersions: s.ComponentVersions.Merge(spec.ComponentVersions), TiDBServers: append(s.TiDBServers, spec.TiDBServers...), TiKVServers: append(s.TiKVServers, spec.TiKVServers...), PDServers: append(s.PDServers, spec.PDServers...), DashboardServers: append(s.DashboardServers, spec.DashboardServers...), TiFlashServers: append(s.TiFlashServers, spec.TiFlashServers...), TiProxyServers: append(s.TiProxyServers, spec.TiProxyServers...), TSOServers: append(s.TSOServers, spec.TSOServers...), SchedulingServers: append(s.SchedulingServers, spec.SchedulingServers...), PumpServers: append(s.PumpServers, spec.PumpServers...), Drainers: append(s.Drainers, spec.Drainers...), CDCServers: append(s.CDCServers, spec.CDCServers...), TiKVCDCServers: append(s.TiKVCDCServers, spec.TiKVCDCServers...), TiSparkMasters: append(s.TiSparkMasters, spec.TiSparkMasters...), TiSparkWorkers: append(s.TiSparkWorkers, spec.TiSparkWorkers...), Monitors: append(s.Monitors, spec.Monitors...), Grafanas: append(s.Grafanas, spec.Grafanas...), Alertmanagers: append(s.Alertmanagers, spec.Alertmanagers...), } } // Merge returns a new ComponentVersions which sum old ones func (v *ComponentVersions) Merge(that ComponentVersions) ComponentVersions { return ComponentVersions{ TiDB: utils.Ternary(that.TiDB != "", that.TiDB, v.TiDB).(string), TiKV: utils.Ternary(that.TiKV != "", that.TiKV, v.TiKV).(string), PD: utils.Ternary(that.PD != "", that.PD, v.PD).(string), TSO: utils.Ternary(that.TSO != "", that.TSO, v.TSO).(string), Scheduling: utils.Ternary(that.Scheduling != "", that.Scheduling, v.Scheduling).(string), Dashboard: utils.Ternary(that.Dashboard != "", that.Dashboard, v.Dashboard).(string), TiFlash: utils.Ternary(that.TiFlash != "", that.TiFlash, v.TiFlash).(string), TiProxy: utils.Ternary(that.TiProxy != "", that.TiProxy, v.TiProxy).(string), Pump: utils.Ternary(that.Pump != "", that.Pump, v.Pump).(string), Drainer: utils.Ternary(that.Drainer != "", that.Drainer, v.Drainer).(string), CDC: utils.Ternary(that.CDC != "", that.CDC, v.CDC).(string), TiKVCDC: utils.Ternary(that.TiKVCDC != "", that.TiKVCDC, v.TiKVCDC).(string), Grafana: utils.Ternary(that.Grafana != "", that.Grafana, v.Grafana).(string), Prometheus: utils.Ternary(that.Prometheus != "", that.Prometheus, v.Prometheus).(string), AlertManager: utils.Ternary(that.AlertManager != "", that.AlertManager, v.AlertManager).(string), } } // fillDefaults tries to fill custom fields to their default values func fillCustomDefaults(globalOptions *GlobalOptions, data any) error { v := reflect.ValueOf(data).Elem() t := v.Type() var err error for i := 0; i < t.NumField(); i++ { if err = setCustomDefaults(globalOptions, v.Field(i)); err != nil { return err } } return nil } var ( globalOptionTypeName = reflect.TypeOf(GlobalOptions{}).Name() monitorOptionTypeName = reflect.TypeOf(MonitoredOptions{}).Name() serverConfigsTypeName = reflect.TypeOf(ServerConfigs{}).Name() componentVersionsTypeName = reflect.TypeOf(ComponentVersions{}).Name() componentSourcesTypeName = reflect.TypeOf(ComponentSources{}).Name() ) // Skip global/monitored options func isSkipField(field reflect.Value) bool { tp := field.Type().Name() return tp == globalOptionTypeName || tp == monitorOptionTypeName || tp == serverConfigsTypeName || tp == componentVersionsTypeName || tp == componentSourcesTypeName } func setDefaultDir(parent, role, port string, field reflect.Value) { if field.String() != "" { return } if defaults.CanUpdate(field.Interface()) { dir := fmt.Sprintf("%s-%s", role, port) field.Set(reflect.ValueOf(filepath.Join(parent, dir))) } } func setCustomDefaults(globalOptions *GlobalOptions, field reflect.Value) error { if !field.CanSet() || isSkipField(field) { return nil } switch field.Kind() { case reflect.Slice: for i := 0; i < field.Len(); i++ { if err := setCustomDefaults(globalOptions, field.Index(i)); err != nil { return err } } case reflect.Struct: ref := reflect.New(field.Type()) ref.Elem().Set(field) if err := fillCustomDefaults(globalOptions, ref.Interface()); err != nil { return err } field.Set(ref.Elem()) case reflect.Ptr: if err := setCustomDefaults(globalOptions, field.Elem()); err != nil { return err } } if field.Kind() != reflect.Struct { return nil } for j := 0; j < field.NumField(); j++ { switch field.Type().Field(j).Name { case "SSHPort": if field.Field(j).Int() != 0 { continue } field.Field(j).Set(reflect.ValueOf(globalOptions.SSHPort)) case "Name": // Only PD related components have `Name` field, if field.Field(j).String() != "" { continue } host := reflect.Indirect(field).FieldByName("Host").String() // `TSO` and `Scheduling` components use `Port` filed if reflect.Indirect(field).FieldByName("Port").IsValid() { port := reflect.Indirect(field).FieldByName("Port").Int() // field.String() is role := strings.Split(strings.Split(field.Type().String(), ".")[1], "Spec")[0] component := strings.ToLower(role) field.Field(j).Set(reflect.ValueOf(fmt.Sprintf("%s-%s-%d", component, host, port))) continue } clientPort := reflect.Indirect(field).FieldByName("ClientPort").Int() field.Field(j).Set(reflect.ValueOf(fmt.Sprintf("pd-%s-%d", host, clientPort))) case "DataDir": if imported := reflect.Indirect(field).FieldByName("Imported"); imported.IsValid() && imported.Interface().(bool) { setDefaultDir(globalOptions.DataDir, field.Addr().Interface().(InstanceSpec).Role(), getPort(field), field.Field(j)) } dataDir := field.Field(j).String() // If the per-instance data_dir already have a value, skip filling default values // and ignore any value in global data_dir, the default values are filled only // when the pre-instance data_dir is empty if dataDir != "" { continue } // If the data dir in global options is an absolute path, append current // value to the global and has a comp-port sub directory if strings.HasPrefix(globalOptions.DataDir, "/") { field.Field(j).Set(reflect.ValueOf(filepath.Join( globalOptions.DataDir, fmt.Sprintf("%s-%s", field.Addr().Interface().(InstanceSpec).Role(), getPort(field)), ))) continue } // If the data dir in global options is empty or a relative path, keep it be relative // Our run_*.sh start scripts are run inside deploy_path, so the final location // will be deploy_path/global.data_dir // (the default value of global.data_dir is "data") if globalOptions.DataDir == "" { field.Field(j).Set(reflect.ValueOf("data")) } else { field.Field(j).Set(reflect.ValueOf(globalOptions.DataDir)) } case "DeployDir": setDefaultDir(globalOptions.DeployDir, field.Addr().Interface().(InstanceSpec).Role(), getPort(field), field.Field(j)) case "LogDir": if imported := reflect.Indirect(field).FieldByName("Imported"); imported.IsValid() && imported.Interface().(bool) { setDefaultDir(globalOptions.LogDir, field.Addr().Interface().(InstanceSpec).Role(), getPort(field), field.Field(j)) } logDir := field.Field(j).String() // If the per-instance log_dir already have a value, skip filling default values // and ignore any value in global log_dir, the default values are filled only // when the pre-instance log_dir is empty if logDir != "" { continue } // If the log dir in global options is an absolute path, append current // value to the global and has a comp-port sub directory if strings.HasPrefix(globalOptions.LogDir, "/") { field.Field(j).Set(reflect.ValueOf(filepath.Join( globalOptions.LogDir, fmt.Sprintf("%s-%s", field.Addr().Interface().(InstanceSpec).Role(), getPort(field)), ))) continue } // If the log dir in global options is empty or a relative path, keep it be relative // Our run_*.sh start scripts are run inside deploy_path, so the final location // will be deploy_path/global.log_dir // (the default value of global.log_dir is "log") if globalOptions.LogDir == "" { field.Field(j).Set(reflect.ValueOf("log")) } else { field.Field(j).Set(reflect.ValueOf(globalOptions.LogDir)) } case "Arch": switch strings.ToLower(field.Field(j).String()) { // replace "x86_64" with amd64, they are the same in our repo case "x86_64": field.Field(j).Set(reflect.ValueOf("amd64")) // replace "aarch64" with arm64 case "aarch64": field.Field(j).Set(reflect.ValueOf("arm64")) } // convert to lower case if field.Field(j).String() != "" { field.Field(j).Set(reflect.ValueOf(strings.ToLower(field.Field(j).String()))) } case "OS": // convert to lower case if field.Field(j).String() != "" { field.Field(j).Set(reflect.ValueOf(strings.ToLower(field.Field(j).String()))) } } } return nil } func getPort(v reflect.Value) string { for i := 0; i < v.NumField(); i++ { switch v.Type().Field(i).Name { case "Port", "ClientPort", "WebPort", "TCPPort", "NodeExporterPort": return fmt.Sprintf("%d", v.Field(i).Int()) } } return "" } // ComponentsByStopOrder return component in the order need to stop. func (s *Specification) ComponentsByStopOrder() (comps []Component) { comps = s.ComponentsByStartOrder() // revert order i := 0 j := len(comps) - 1 for i < j { comps[i], comps[j] = comps[j], comps[i] i++ j-- } return } // ComponentsByStartOrder return component in the order need to start. func (s *Specification) ComponentsByStartOrder() (comps []Component) { // "pd", "tso", "scheduling", "dashboard", "tiproxy", "tikv", "pump", "tidb", "tiflash", "drainer", "cdc", "tikv-cdc", "prometheus", "grafana", "alertmanager" comps = append(comps, &PDComponent{s}) comps = append(comps, &TSOComponent{s}) comps = append(comps, &SchedulingComponent{s}) comps = append(comps, &DashboardComponent{s}) comps = append(comps, &TiProxyComponent{s}) comps = append(comps, &TiKVComponent{s}) comps = append(comps, &PumpComponent{s}) comps = append(comps, &TiDBComponent{s}) comps = append(comps, &TiFlashComponent{s}) comps = append(comps, &DrainerComponent{s}) comps = append(comps, &CDCComponent{s}) comps = append(comps, &TiKVCDCComponent{s}) comps = append(comps, &MonitorComponent{s}) comps = append(comps, &GrafanaComponent{s}) comps = append(comps, &AlertManagerComponent{s}) comps = append(comps, &TiSparkMasterComponent{s}) comps = append(comps, &TiSparkWorkerComponent{s}) return } // ComponentsByUpdateOrder return component in the order need to be updated. func (s *Specification) ComponentsByUpdateOrder(curVer string) (comps []Component) { // Ref: https://github.com/pingcap/tiup/issues/2166 cdcUpgradeBeforePDTiKVTiDB := tidbver.TiCDCUpgradeBeforePDTiKVTiDB(curVer) // "tiflash", <"cdc">, "pd", "tso", "scheduling", "dashboard", "tiproxy", "tikv", "pump", "tidb", "drainer", <"cdc>", "prometheus", "grafana", "alertmanager" comps = append(comps, &TiFlashComponent{s}) if cdcUpgradeBeforePDTiKVTiDB { comps = append(comps, &CDCComponent{s}) } comps = append(comps, &PDComponent{s}) comps = append(comps, &TSOComponent{s}) comps = append(comps, &SchedulingComponent{s}) comps = append(comps, &DashboardComponent{s}) comps = append(comps, &TiProxyComponent{s}) comps = append(comps, &TiKVComponent{s}) comps = append(comps, &PumpComponent{s}) comps = append(comps, &TiDBComponent{s}) comps = append(comps, &DrainerComponent{s}) if !cdcUpgradeBeforePDTiKVTiDB { comps = append(comps, &CDCComponent{s}) } comps = append(comps, &MonitorComponent{s}) comps = append(comps, &GrafanaComponent{s}) comps = append(comps, &AlertManagerComponent{s}) comps = append(comps, &TiSparkMasterComponent{s}) comps = append(comps, &TiSparkWorkerComponent{s}) return } // FindComponent returns the Component corresponding the name func FindComponent(topo Topology, name string) Component { for _, com := range topo.ComponentsByStartOrder() { if com.Name() == name { return com } } return nil } // IterComponent iterates all components in component starting order func (s *Specification) IterComponent(fn func(comp Component)) { for _, comp := range s.ComponentsByStartOrder() { fn(comp) } } // IterInstance iterates all instances in component starting order func (s *Specification) IterInstance(fn func(instance Instance), concurrency ...int) { maxWorkers := 1 wg := sync.WaitGroup{} if len(concurrency) > 0 && concurrency[0] > 1 { maxWorkers = concurrency[0] } workerPool := make(chan struct{}, maxWorkers) for _, comp := range s.ComponentsByStartOrder() { for _, inst := range comp.Instances() { wg.Add(1) workerPool <- struct{}{} go func(inst Instance) { defer func() { <-workerPool wg.Done() }() fn(inst) }(inst) } } wg.Wait() } // IterHost iterates one instance for each host func IterHost(topo Topology, fn func(instance Instance)) { hostMap := make(map[string]bool) for _, comp := range topo.ComponentsByStartOrder() { for _, inst := range comp.Instances() { host := inst.GetHost() _, ok := hostMap[host] if !ok { hostMap[host] = true fn(inst) } } } } // FillHostArchOrOS fills the topology with the given host->arch func (s *Specification) FillHostArchOrOS(hostArch map[string]string, fullType FullHostType) error { if err := FillHostArchOrOS(s, hostArch, fullType); err != nil { return err } return s.platformConflictsDetect() } // FillHostArchOrOS fills the topology with the given host->arch func FillHostArchOrOS(s any, hostArchOrOS map[string]string, fullType FullHostType) error { for host, arch := range hostArchOrOS { switch arch { case "x86_64": hostArchOrOS[host] = "amd64" case "aarch64": hostArchOrOS[host] = "arm64" default: hostArchOrOS[host] = strings.ToLower(arch) } } v := reflect.ValueOf(s).Elem() t := v.Type() for i := 0; i < t.NumField(); i++ { field := v.Field(i) if field.Kind() != reflect.Slice { continue } for j := 0; j < field.Len(); j++ { if err := setHostArchOrOS(field.Index(j), hostArchOrOS, fullType); err != nil { return err } } } return nil } func setHostArchOrOS(field reflect.Value, hostArchOrOS map[string]string, fullType FullHostType) error { if !field.CanSet() || isSkipField(field) { return nil } if field.Kind() == reflect.Ptr { return setHostArchOrOS(field.Elem(), hostArchOrOS, fullType) } if field.Kind() != reflect.Struct { return nil } host := field.FieldByName("Host") if field.FieldByName("ManageHost").String() != "" { host = field.FieldByName("ManageHost") } arch := field.FieldByName("Arch") os := field.FieldByName("OS") // set arch only if not set before if fullType == FullOSType { if !host.IsZero() && os.CanSet() && len(os.String()) == 0 { os.Set(reflect.ValueOf(hostArchOrOS[host.String()])) } } else { if !host.IsZero() && arch.CanSet() && len(arch.String()) == 0 { arch.Set(reflect.ValueOf(hostArchOrOS[host.String()])) } } return nil } // when upgrade form old tiup-cluster, replace spec.CommitTS with spec.Config["initial-commit-ts"] func (s *Specification) removeCommitTS() { _, ok1 := s.ServerConfigs.Drainer["initial-commit-ts"] for _, spec := range s.Drainers { _, ok2 := spec.Config["initial-commit-ts"] if !ok1 && !ok2 && spec.CommitTS != nil && *spec.CommitTS != -1 { if spec.Config == nil { spec.Config = make(map[string]any) } spec.Config["initial-commit-ts"] = *spec.CommitTS } spec.CommitTS = nil } } // GetGrafanaConfig returns global grafana configurations func (s *Specification) GetGrafanaConfig() map[string]string { return s.ServerConfigs.Grafana } tiup-1.16.3/pkg/cluster/spec/spec_manager.go000066400000000000000000000175421505422223000207360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "errors" "os" "path/filepath" "github.com/joomcode/errorx" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "gopkg.in/yaml.v3" ) var ( errNS = errorx.NewNamespace("spec") // ErrCreateDirFailed is ErrCreateDirFailed ErrCreateDirFailed = errNS.NewType("create_dir_failed") // ErrSaveMetaFailed is ErrSaveMetaFailed ErrSaveMetaFailed = errNS.NewType("save_meta_failed") // ErrSaveScaleOutFileFailed is ErrSaveMetaFailed ErrSaveScaleOutFileFailed = errNS.NewType("save_scale-out_lock_failed") ) const ( // metaFileName is the file name of the meta file. metaFileName = "meta.yaml" // PatchDirName is the directory to store patch file eg. {PatchDirName}/tidb-hotfix.tar.gz PatchDirName = "patch" // BackupDirName is the directory to save backup files. BackupDirName = "backup" // ScaleOutLockName scale_out snapshot file, like file lock ScaleOutLockName = ".scale-out.yaml" ) //revive:disable // SpecManager control management of spec meta data. type SpecManager struct { base string newMeta func() Metadata } // NewSpec create a spec instance. func NewSpec(base string, newMeta func() Metadata) *SpecManager { return &SpecManager{ base: base, newMeta: newMeta, } } // NewMetadata alloc a Metadata according the type. func (s *SpecManager) NewMetadata() Metadata { return s.newMeta() } // Path returns the full path to a subpath (file or directory) of a // cluster, it is a subdir in the profile dir of the user, with the cluster name // as its name. func (s *SpecManager) Path(cluster string, subpath ...string) string { if cluster == "" { // keep the same behavior with legacy version of TiUP, we could change // it in the future if needed. cluster = "default-cluster" } return filepath.Join(append([]string{ s.base, cluster, }, subpath...)...) } // SaveMeta save the meta with specified cluster name. func (s *SpecManager) SaveMeta(clusterName string, meta Metadata) error { wrapError := func(err error) *errorx.Error { return ErrSaveMetaFailed.Wrap(err, "Failed to save cluster metadata") } metaFile := s.Path(clusterName, metaFileName) backupDir := s.Path(clusterName, BackupDirName) if err := s.ensureDir(clusterName); err != nil { return wrapError(err) } if err := utils.MkdirAll(backupDir, 0755); err != nil { return wrapError(err) } data, err := yaml.Marshal(meta) if err != nil { return wrapError(err) } opsVer := meta.GetBaseMeta().OpsVer if opsVer != nil { *opsVer = version.NewTiUPVersion().String() } err = utils.SaveFileWithBackup(metaFile, data, backupDir) if err != nil { return wrapError(err) } return nil } // Metadata tries to read the metadata of a cluster from file func (s *SpecManager) Metadata(clusterName string, meta any) error { fname := s.Path(clusterName, metaFileName) yamlFile, err := os.ReadFile(fname) if err != nil { return perrs.AddStack(err) } err = yaml.Unmarshal(yamlFile, meta) if err != nil { return perrs.AddStack(err) } return nil } // Exist check if the cluster exist by checking the meta file. func (s *SpecManager) Exist(clusterName string) (exist bool, err error) { fname := s.Path(clusterName, metaFileName) _, err = os.Stat(fname) if err != nil { if os.IsNotExist(err) { return false, nil } return false, perrs.AddStack(err) } return true, nil } // Remove remove the data with specified cluster name. func (s *SpecManager) Remove(clusterName string) error { return os.RemoveAll(s.Path(clusterName)) } // List return the cluster names. func (s *SpecManager) List() (clusterNames []string, err error) { fileInfos, err := os.ReadDir(s.base) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, perrs.AddStack(err) } for _, info := range fileInfos { if utils.IsNotExist(s.Path(info.Name(), metaFileName)) { continue } clusterNames = append(clusterNames, info.Name()) } return } // GetAllClusters get a metadata list of all clusters deployed by current user func (s *SpecManager) GetAllClusters() (map[string]Metadata, error) { clusters := make(map[string]Metadata) names, err := s.List() if err != nil { return nil, err } for _, name := range names { metadata := s.NewMetadata() err = s.Metadata(name, metadata) // clusters with topology validation errors should also be listed if err != nil && !errors.Is(perrs.Cause(err), meta.ErrValidate) && !errors.Is(perrs.Cause(err), ErrNoTiSparkMaster) { return nil, perrs.Trace(err) } clusters[name] = metadata } return clusters, nil } // ensureDir ensures that the cluster directory exists. func (s *SpecManager) ensureDir(clusterName string) error { if err := utils.MkdirAll(s.Path(clusterName), 0755); err != nil { return ErrCreateDirFailed. Wrap(err, "Failed to create cluster metadata directory '%s'", s.Path(clusterName)). WithProperty(tui.SuggestionFromString("Please check file system permissions and try again.")) } return nil } // ScaleOutLock tries to read the ScaleOutLock of a cluster from file func (s *SpecManager) ScaleOutLock(clusterName string) (Topology, error) { if locked, err := s.IsScaleOutLocked(clusterName); !locked { return nil, ErrSaveScaleOutFileFailed.Wrap(err, "Scale-out file lock does not exist"). WithProperty(tui.SuggestionFromString("Please make sure to run tiup-cluster scale-out --stage1 and try again.")) } fname := s.Path(clusterName, ScaleOutLockName) // UnMarshal file lock topo := &Specification{} err := ParseTopologyYaml(fname, topo) if err != nil { return nil, err } return topo, nil } // ScaleOutLockedErr: Determine whether there is a lock, and report an error if it exists func (s *SpecManager) ScaleOutLockedErr(clusterName string) error { if locked, err := s.IsScaleOutLocked(clusterName); locked { return errNS.NewType("scale-out lock").Wrap(err, "Scale-out file lock already exists"). WithProperty(tui.SuggestionFromString("Please run 'tiup-cluster scale-out --stage2' to continue.")) } return nil } // IsScaleOutLocked: judge the cluster scale-out file lock status func (s *SpecManager) IsScaleOutLocked(clusterName string) (locked bool, err error) { fname := s.Path(clusterName, ScaleOutLockName) _, err = os.Stat(fname) if err != nil { if os.IsNotExist(err) { return false, nil } return false, perrs.AddStack(err) } return true, nil } // NewScaleOutLock save the meta with specified cluster name. func (s *SpecManager) NewScaleOutLock(clusterName string, topo Topology) error { wrapError := func(err error) *errorx.Error { return ErrSaveScaleOutFileFailed.Wrap(err, "Failed to create scale-out file lock") } if locked, err := s.IsScaleOutLocked(clusterName); locked { return wrapError(err). WithProperty(tui.SuggestionFromString("The scale out file lock already exists, please run tiup-cluster scale-out --stage2 to continue.")) } lockFile := s.Path(clusterName, ScaleOutLockName) if err := s.ensureDir(clusterName); err != nil { return wrapError(err) } data, err := yaml.Marshal(topo) if err != nil { return wrapError(err) } err = utils.WriteFile(lockFile, data, 0644) if err != nil { return wrapError(err) } return nil } // ReleaseScaleOutLock remove the scale-out file lock with specified cluster func (s *SpecManager) ReleaseScaleOutLock(clusterName string) error { return os.Remove(s.Path(clusterName, ScaleOutLockName)) } tiup-1.16.3/pkg/cluster/spec/spec_manager_test.go000066400000000000000000000102061505422223000217630ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "crypto/tls" "os" "path/filepath" "sort" "testing" "github.com/stretchr/testify/assert" ) type TestMetadata struct { BaseMeta Topo *TestTopology } func (m *TestMetadata) SetVersion(s string) { m.BaseMeta.Version = s } func (m *TestMetadata) SetUser(s string) { m.BaseMeta.User = s } func (m *TestMetadata) GetTopology() Topology { return m.Topo } func (m *TestMetadata) GetBaseMeta() *BaseMeta { return &m.BaseMeta } func (t *TestTopology) Merge(topo Topology) Topology { panic("not support") } func (t *TestTopology) FillHostArchOrOS(hostArchOrOS map[string]string, fullType FullHostType) error { panic("not support") } func (m *TestMetadata) SetTopology(topo Topology) { testTopo, ok := topo.(*TestTopology) if !ok { panic("wrong toplogy type") } m.Topo = testTopo } type TestTopology struct { base BaseTopo } func (t *TestTopology) Validate() error { return nil } func (t *TestTopology) TLSConfig(dir string) (*tls.Config, error) { return nil, nil } func (t *TestTopology) NewPart() Topology { panic("not support") } func (t *TestTopology) MergeTopo(topo Topology) Topology { panic("not support") } func (t *TestTopology) Type() string { return TopoTypeTiDB } func (t *TestTopology) BaseTopo() *BaseTopo { return &t.base } func (t *TestTopology) ComponentsByStartOrder() []Component { return nil } func (t *TestTopology) ComponentsByStopOrder() []Component { return nil } func (t *TestTopology) ComponentsByUpdateOrder(curVer string) []Component { return nil } func (t *TestTopology) IterInstance(fn func(instance Instance), concurrency ...int) { } func (t *TestTopology) GetMonitoredOptions() *MonitoredOptions { return nil } func (t *TestTopology) GetGlobalOptions() GlobalOptions { return GlobalOptions{} } func (t *TestTopology) CountDir(host string, dir string) int { return 0 } func (t *TestTopology) GetGrafanaConfig() map[string]string { return nil } func TestSpec(t *testing.T) { dir, err := os.MkdirTemp("", "test-*") assert.Nil(t, err) spec := NewSpec(dir, func() Metadata { return new(TestMetadata) }) names, err := spec.List() assert.Nil(t, err) assert.Len(t, names, 0) // Should ignore directory without meta file. err = os.Mkdir(filepath.Join(dir, "dummy"), 0755) assert.Nil(t, err) names, err = spec.List() assert.Nil(t, err) assert.Len(t, names, 0) exist, err := spec.Exist("dummy") assert.Nil(t, err) assert.False(t, exist) var meta1 = &TestMetadata{ BaseMeta: BaseMeta{ Version: "1.1.1", }, Topo: &TestTopology{}, } var meta2 = &TestMetadata{ BaseMeta: BaseMeta{ Version: "2.2.2", }, Topo: &TestTopology{}, } err = spec.SaveMeta("name1", meta1) assert.Nil(t, err) err = spec.SaveMeta("name2", meta2) assert.Nil(t, err) getMeta := new(TestMetadata) err = spec.Metadata("name1", getMeta) assert.Nil(t, err) assert.Equal(t, meta1, getMeta) err = spec.Metadata("name2", getMeta) assert.Nil(t, err) assert.Equal(t, meta2, getMeta) names, err = spec.List() assert.Nil(t, err) assert.Len(t, names, 2) sort.Strings(names) assert.Equal(t, "name1", names[0]) assert.Equal(t, "name2", names[1]) exist, err = spec.Exist("name1") assert.Nil(t, err) assert.True(t, exist) exist, err = spec.Exist("name2") assert.Nil(t, err) assert.True(t, exist) specList, err := spec.GetAllClusters() assert.Nil(t, err) assert.Equal(t, meta1, specList["name1"]) assert.Equal(t, meta2, specList["name2"]) // remove name1 and check again. err = spec.Remove("name1") assert.Nil(t, err) exist, err = spec.Exist("name1") assert.Nil(t, err) assert.False(t, exist) // remove a not exist cluster should be fine err = spec.Remove("name1") assert.Nil(t, err) } tiup-1.16.3/pkg/cluster/spec/spec_test.go000066400000000000000000000617101505422223000202770ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "context" "strings" "testing" "github.com/BurntSushi/toml" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestDefaultDataDir(t *testing.T) { // Test with without global DataDir. topo := new(Specification) topo.TiKVServers = append(topo.TiKVServers, &TiKVSpec{Host: "1.1.1.1", Port: 22}) topo.CDCServers = append(topo.CDCServers, &CDCSpec{Host: "2.3.3.3", Port: 22}) topo.TiKVCDCServers = append(topo.TiKVCDCServers, &TiKVCDCSpec{Host: "3.3.3.3", Port: 22}) data, err := yaml.Marshal(topo) require.NoError(t, err) // Check default value. topo = new(Specification) err = yaml.Unmarshal(data, topo) require.NoError(t, err) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "data", topo.TiKVServers[0].DataDir) require.Equal(t, "data", topo.CDCServers[0].DataDir) require.Equal(t, "data", topo.TiKVCDCServers[0].DataDir) // Can keep the default value. data, err = yaml.Marshal(topo) require.NoError(t, err) topo = new(Specification) err = yaml.Unmarshal(data, topo) require.NoError(t, err) require.Equal(t, "data", topo.GlobalOptions.DataDir) require.Equal(t, "data", topo.TiKVServers[0].DataDir) require.Equal(t, "data", topo.CDCServers[0].DataDir) require.Equal(t, "data", topo.TiKVCDCServers[0].DataDir) // Test with global DataDir. topo = new(Specification) topo.GlobalOptions.DataDir = "/global_data" topo.TiKVServers = append(topo.TiKVServers, &TiKVSpec{Host: "1.1.1.1", Port: 22}) topo.TiKVServers = append(topo.TiKVServers, &TiKVSpec{Host: "1.1.1.2", Port: 33, DataDir: "/my_data"}) topo.CDCServers = append(topo.CDCServers, &CDCSpec{Host: "2.3.3.3", Port: 22}) topo.CDCServers = append(topo.CDCServers, &CDCSpec{Host: "2.3.3.4", Port: 22, DataDir: "/cdc_data"}) topo.TiKVCDCServers = append(topo.TiKVCDCServers, &TiKVCDCSpec{Host: "3.3.3.3", Port: 22}) topo.TiKVCDCServers = append(topo.TiKVCDCServers, &TiKVCDCSpec{Host: "3.3.3.4", Port: 22, DataDir: "/tikv-cdc_data"}) data, err = yaml.Marshal(topo) require.NoError(t, err) topo = new(Specification) err = yaml.Unmarshal(data, topo) require.NoError(t, err) require.Equal(t, "/global_data", topo.GlobalOptions.DataDir) require.Equal(t, "/global_data/tikv-22", topo.TiKVServers[0].DataDir) require.Equal(t, "/my_data", topo.TiKVServers[1].DataDir) require.Equal(t, "/global_data/cdc-22", topo.CDCServers[0].DataDir) require.Equal(t, "/cdc_data", topo.CDCServers[1].DataDir) require.Equal(t, "/global_data/tikv-cdc-22", topo.TiKVCDCServers[0].DataDir) require.Equal(t, "/tikv-cdc_data", topo.TiKVCDCServers[1].DataDir) } func TestGlobalOptions(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tidb_servers: - host: 172.16.5.138 deploy_dir: "tidb-deploy" pd_servers: - host: 172.16.5.53 data_dir: "pd-data" cdc_servers: - host: 172.16.5.233 data_dir: "cdc-data" kvcdc_servers: - host: 172.16.5.244 data_dir: "tikv-cdc-data" `), &topo) require.NoError(t, err) require.Equal(t, "test1", topo.GlobalOptions.User) require.Equal(t, 220, topo.GlobalOptions.SSHPort) require.Equal(t, 220, topo.TiDBServers[0].SSHPort) require.Equal(t, "tidb-deploy", topo.TiDBServers[0].DeployDir) require.Equal(t, 220, topo.PDServers[0].SSHPort) require.Equal(t, "test-deploy/pd-2379", topo.PDServers[0].DeployDir) require.Equal(t, "pd-data", topo.PDServers[0].DataDir) require.Equal(t, 220, topo.CDCServers[0].SSHPort) require.Equal(t, "test-deploy/cdc-8300", topo.CDCServers[0].DeployDir) require.Equal(t, "cdc-data", topo.CDCServers[0].DataDir) require.Equal(t, 220, topo.TiKVCDCServers[0].SSHPort) require.Equal(t, "test-deploy/tikv-cdc-8600", topo.TiKVCDCServers[0].DeployDir) require.Equal(t, "tikv-cdc-data", topo.TiKVCDCServers[0].DataDir) } func TestDataDirAbsolute(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" data_dir: "/test-data" pd_servers: - host: 172.16.5.53 data_dir: "pd-data" - host: 172.16.5.54 client_port: 12379 cdc_servers: - host: 172.16.5.233 data_dir: "cdc-data" - host: 172.16.5.234 port: 23333 kvcdc_servers: - host: 172.16.5.244 data_dir: "tikv-cdc-data" - host: 172.16.5.245 port: 33333 `), &topo) require.NoError(t, err) require.Equal(t, "pd-data", topo.PDServers[0].DataDir) require.Equal(t, "/test-data/pd-12379", topo.PDServers[1].DataDir) require.Equal(t, "cdc-data", topo.CDCServers[0].DataDir) require.Equal(t, "/test-data/cdc-23333", topo.CDCServers[1].DataDir) require.Equal(t, "tikv-cdc-data", topo.TiKVCDCServers[0].DataDir) require.Equal(t, "/test-data/tikv-cdc-33333", topo.TiKVCDCServers[1].DataDir) } func TestGlobalConfig(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" server_configs: tidb: status.address: 10 port: 1230 latch.capacity: 20480 log.file.rotate: "123445.xxx" tikv: status.address: 10 port: 1230 latch.capacity: 20480 pd: status.address: 10 port: 1230 scheduler.max_limit: 20480 kvcdc: gc-ttl: 43200 tidb_servers: - host: 172.16.5.138 port: 1234 config: latch.capacity: 3000 log.file.rotate: "44444.xxx" - host: 172.16.5.139 port: 1234 config: latch.capacity: 5000 log.file.rotate: "55555.xxx" kvcdc_servers: - host: 172.16.5.200 - host: 172.16.5.201 port: 8601 config: log-level: "debug" `), &topo) require.NoError(t, err) require.Equal(t, map[string]any{ "status.address": 10, "port": 1230, "latch.capacity": 20480, "log.file.rotate": "123445.xxx", }, topo.ServerConfigs.TiDB) require.Equal(t, map[string]any{ "gc-ttl": 43200, }, topo.ServerConfigs.TiKVCDC) expected := map[string]any{ "status": map[string]any{ "address": 10, }, "port": 1230, "latch": map[string]any{ "capacity": 20480, }, "log": map[string]any{ "file": map[string]any{ "rotate": "123445.xxx", }, }, } got := FoldMap(topo.ServerConfigs.TiDB) require.Equal(t, expected, got) buf := &bytes.Buffer{} err = toml.NewEncoder(buf).Encode(expected) require.NoError(t, err) require.Equal(t, `port = 1230 [latch] capacity = 20480 [log] [log.file] rotate = "123445.xxx" [status] address = 10 `, buf.String()) expected = map[string]any{ "latch": map[string]any{ "capacity": 3000, }, "log": map[string]any{ "file": map[string]any{ "rotate": "44444.xxx", }, }, } got = FoldMap(topo.TiDBServers[0].Config) require.Equal(t, expected, got) expected = map[string]any{ "latch": map[string]any{ "capacity": 5000, }, "log": map[string]any{ "file": map[string]any{ "rotate": "55555.xxx", }, }, } got = FoldMap(topo.TiDBServers[1].Config) require.Equal(t, expected, got) expected = map[string]any{} got = FoldMap(topo.TiKVCDCServers[0].Config) require.Equal(t, expected, got) expected = map[string]any{} got = FoldMap(topo.TiKVCDCServers[0].Config) require.Equal(t, expected, got) expected = map[string]any{ "log-level": "debug", } got = FoldMap(topo.TiKVCDCServers[1].Config) require.Equal(t, expected, got) } func TestGlobalConfigPatch(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` tikv_sata_config: &tikv_sata_config config.item1: 100 config.item2: 300 config.item3.item5: 500 config.item3.item6: 600 tikv_servers: - host: 172.16.5.138 config: *tikv_sata_config `), &topo) require.NoError(t, err) expected := map[string]any{ "config": map[string]any{ "item1": 100, "item2": 300, "item3": map[string]any{ "item5": 500, "item6": 600, }, }, } got := FoldMap(topo.TiKVServers[0].Config) require.Equal(t, expected, got) } func TestLogDir(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` tidb_servers: - host: 172.16.5.138 deploy_dir: "test-deploy" log_dir: "test-deploy/log" `), &topo) require.NoError(t, err) require.Equal(t, "test-deploy/log", topo.TiDBServers[0].LogDir) } func TestMonitorLogDir(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` monitored: deploy_dir: "test-deploy" log_dir: "test-deploy/log" `), &topo) require.NoError(t, err) require.Equal(t, "test-deploy/log", topo.MonitoredOptions.LogDir) out, err := yaml.Marshal(topo) require.NoError(t, err) err = yaml.Unmarshal(out, &topo) require.NoError(t, err) require.Equal(t, "test-deploy/log", topo.MonitoredOptions.LogDir) } func TestMerge2Toml(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` server_configs: tikv: config.item1: 100 config.item2: 300 config.item3.item5: 500 config.item3.item6: 600 kvcdc: gc-ttl: 43200 tikv_servers: - host: 172.16.5.138 config: config.item2: 500 config.item3.item5: 700 kvcdc_servers: - host: 172.16.5.238 config: log-level: "debug" `), &topo) require.NoError(t, err) expected := `# WARNING: This file is auto-generated. Do not edit! All your modification will be overwritten! # You can use 'tiup cluster edit-config' and 'tiup cluster reload' to update the configuration # All configuration items you want to change can be added to: # server_configs: # tikv: # aa.b1.c3: value # aa.b2.c4: value [config] item1 = 100 item2 = 500 [config.item3] item5 = 700 item6 = 600 ` got, err := Merge2Toml("tikv", topo.ServerConfigs.TiKV, topo.TiKVServers[0].Config) require.NoError(t, err) require.Equal(t, expected, string(got)) expected = `# WARNING: This file is auto-generated. Do not edit! All your modification will be overwritten! # You can use 'tiup cluster edit-config' and 'tiup cluster reload' to update the configuration # All configuration items you want to change can be added to: # server_configs: # kvcdc: # aa.b1.c3: value # aa.b2.c4: value gc-ttl = 43200 log-level = "debug" ` got, err = Merge2Toml("kvcdc", topo.ServerConfigs.TiKVCDC, topo.TiKVCDCServers[0].Config) require.NoError(t, err) require.Equal(t, expected, string(got)) } func TestMerge2Toml2(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: test4 monitored: node_exporter_port: 9100 blackbox_exporter_port: 9110 server_configs: tidb: repair-mode: true log.level: debug log.slow-query-file: tidb-slow.log log.file.filename: tidb-test.log tikv: readpool.storage.use-unified-pool: true readpool.storage.low-concurrency: 8 pd: schedule.max-merge-region-size: 20 schedule.max-merge-region-keys: 200000 schedule.split-merge-interval: 1h schedule.max-snapshot-count: 3 schedule.max-pending-peer-count: 16 schedule.max-store-down-time: 30m schedule.leader-schedule-limit: 4 schedule.region-schedule-limit: 2048 schedule.replica-schedule-limit: 64 schedule.merge-schedule-limit: 8 schedule.hot-region-schedule-limit: 4 label-property: reject-leader: - key: "zone" value: "cn1" - key: "zone" value: "cn1" tidb_servers: - host: 172.19.0.101 pd_servers: - host: 172.19.0.102 - host: 172.19.0.104 config: schedule.replica-schedule-limit: 164 schedule.merge-schedule-limit: 18 schedule.hot-region-schedule-limit: 14 - host: 172.19.0.105 tikv_servers: - host: 172.19.0.103 `), &topo) require.NoError(t, err) expected := `# WARNING: This file is auto-generated. Do not edit! All your modification will be overwritten! # You can use 'tiup cluster edit-config' and 'tiup cluster reload' to update the configuration # All configuration items you want to change can be added to: # server_configs: # pd: # aa.b1.c3: value # aa.b2.c4: value [label-property] [[label-property.reject-leader]] key = "zone" value = "cn1" [[label-property.reject-leader]] key = "zone" value = "cn1" [schedule] hot-region-schedule-limit = 14 leader-schedule-limit = 4 max-merge-region-keys = 200000 max-merge-region-size = 20 max-pending-peer-count = 16 max-snapshot-count = 3 max-store-down-time = "30m" merge-schedule-limit = 18 region-schedule-limit = 2048 replica-schedule-limit = 164 split-merge-interval = "1h" ` got, err := Merge2Toml("pd", topo.ServerConfigs.PD, topo.PDServers[1].Config) require.NoError(t, err) require.Equal(t, expected, string(got)) } func TestMergeImported(t *testing.T) { spec := Specification{} // values set in topology specification of the cluster err := yaml.Unmarshal([]byte(` server_configs: tikv: config.item1: 100 config.item2: 300 config.item3.item5: 500 config.item3.item6: 600 config2.item4.item7: 700 tikv_servers: - host: 172.16.5.138 config: config.item2: 500 config.item3.item5: 700 config2.itemy: 1000 `), &spec) require.NoError(t, err) // values set in imported configs, this will be overritten by values from // topology specification if present there config := []byte(` [config] item2 = 501 [config.item3] item5 = 701 item6 = 600 [config2] itemx = "valuex" itemy = 999 [config2.item4] item7 = 780 `) expected := `# WARNING: This file is auto-generated. Do not edit! All your modification will be overwritten! # You can use 'tiup cluster edit-config' and 'tiup cluster reload' to update the configuration # All configuration items you want to change can be added to: # server_configs: # tikv: # aa.b1.c3: value # aa.b2.c4: value [config] item1 = 100 item2 = 500 [config.item3] item5 = 700 item6 = 600 [config2] itemx = "valuex" itemy = 1000 [config2.item4] item7 = 700 ` merge1, err := mergeImported(config, spec.ServerConfigs.TiKV) require.NoError(t, err) merge2, err := Merge2Toml(ComponentTiKV, merge1, spec.TiKVServers[0].Config) require.NoError(t, err) require.Equal(t, expected, string(merge2)) } func TestTiKVLabels(t *testing.T) { spec := Specification{} err := yaml.Unmarshal([]byte(` tikv_servers: - host: 172.16.5.138 config: server.labels: dc: dc1 zone: zone1 host: host1 `), &spec) require.NoError(t, err) labels, err := spec.TiKVServers[0].Labels() require.NoError(t, err) require.Equal(t, map[string]string{ "dc": "dc1", "zone": "zone1", "host": "host1", }, labels) spec = Specification{} err = yaml.Unmarshal([]byte(` tikv_servers: - host: 172.16.5.138 config: server.labels.dc: dc1 server.labels.zone: zone1 server.labels.host: host1 `), &spec) require.NoError(t, err) /* labels, err = spec.TiKVServers[0].Labels() require.NoError(t, err) require.Equal(t, map[string]string{ "dc": "dc1", "zone": "zone1", "host": "host1", }, labels) */ } func TestLocationLabels(t *testing.T) { spec := Specification{} lbs, err := spec.LocationLabels() require.NoError(t, err) require.Equal(t, 0, len(lbs)) err = yaml.Unmarshal([]byte(` server_configs: pd: replication.location-labels: ["zone", "host"] `), &spec) require.NoError(t, err) lbs, err = spec.LocationLabels() require.NoError(t, err) require.Equal(t, []string{"zone", "host"}, lbs) spec = Specification{} err = yaml.Unmarshal([]byte(` server_configs: pd: replication: location-labels: - zone - host `), &spec) require.NoError(t, err) lbs, err = spec.LocationLabels() require.NoError(t, err) require.Equal(t, []string{"zone", "host"}, lbs) spec = Specification{} err = yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.140 config: replication: location-labels: - zone - host `), &spec) require.NoError(t, err) _, err = spec.LocationLabels() require.Error(t, err) } func TestTiFlashRequiredCPUFlags(t *testing.T) { obtained := getTiFlashRequiredCPUFlagsWithVersion("v6.3.0", "AMD64") require.Equal(t, TiFlashRequiredCPUFlags, obtained) obtained = getTiFlashRequiredCPUFlagsWithVersion("v6.3.0", "X86_64") require.Equal(t, TiFlashRequiredCPUFlags, obtained) obtained = getTiFlashRequiredCPUFlagsWithVersion("nightly", "amd64") require.Equal(t, TiFlashRequiredCPUFlags, obtained) obtained = getTiFlashRequiredCPUFlagsWithVersion("v6.3.0", "aarch64") require.Equal(t, "", obtained) obtained = getTiFlashRequiredCPUFlagsWithVersion("v6.2.0", "amd64") require.Equal(t, "", obtained) } func TestTiFlashStorageSection(t *testing.T) { ctx := context.Background() spec := &Specification{} err := yaml.Unmarshal([]byte(` tiflash_servers: - host: 172.16.5.138 data_dir: /hdd0/tiflash,/hdd1/tiflash config: storage.main.dir: [/ssd0/tiflash, /ssd1/tiflash] storage.latest.dir: [/ssd0/tiflash] `), spec) require.NoError(t, err) flashComp := FindComponent(spec, ComponentTiFlash) instances := flashComp.Instances() require.Equal(t, 1, len(instances)) // parse using clusterVersion<"v4.0.9" { ins := instances[0] dataDirs := MultiDirAbs("", spec.TiFlashServers[0].DataDir) conf, err := ins.(*TiFlashInstance).initTiFlashConfig(ctx, "v4.0.8", spec.ServerConfigs.TiFlash, meta.DirPaths{Deploy: spec.TiFlashServers[0].DeployDir, Data: dataDirs, Log: spec.TiFlashServers[0].LogDir}) require.NoError(t, err) path, ok := conf["path"] require.True(t, ok) require.Equal(t, "/ssd0/tiflash,/ssd1/tiflash", path) } // parse using clusterVersion>="v4.0.9" checkWithVersion := func(ver string) { ins := instances[0].(*TiFlashInstance) dataDirs := MultiDirAbs("", spec.TiFlashServers[0].DataDir) conf, err := ins.initTiFlashConfig(ctx, ver, spec.ServerConfigs.TiFlash, meta.DirPaths{Deploy: spec.TiFlashServers[0].DeployDir, Data: dataDirs, Log: spec.TiFlashServers[0].LogDir}) require.NoError(t, err) _, ok := conf["path"] require.True(t, ok) // After merging instance configurations with "storgae", the "path" property should be removed. conf, err = ins.mergeTiFlashInstanceConfig(ver, conf, ins.InstanceSpec.(*TiFlashSpec).Config) require.NoError(t, err) _, ok = conf["path"] require.False(t, ok) if storageSection, ok := conf["storage"]; ok { if mainSection, ok := storageSection.(map[string]any)["main"]; ok { if mainDirsSection, ok := mainSection.(map[string]any)["dir"]; ok { var mainDirs []any = mainDirsSection.([]any) require.Equal(t, 2, len(mainDirs)) require.Equal(t, "/ssd0/tiflash", mainDirs[0].(string)) require.Equal(t, "/ssd1/tiflash", mainDirs[1].(string)) } else { t.Error("Can not get storage.main.dir section") } } else { t.Error("Can not get storage.main section") } if latestSection, ok := storageSection.(map[string]any)["latest"]; ok { if latestDirsSection, ok := latestSection.(map[string]any)["dir"]; ok { var latestDirs []any = latestDirsSection.([]any) require.Equal(t, 1, len(latestDirs)) require.Equal(t, "/ssd0/tiflash", latestDirs[0].(string)) } else { t.Error("Can not get storage.main.dir section") } } else { t.Error("Can not get storage.main section") } } else { t.Error("Can not get storage section") } } checkWithVersion("v4.0.9") checkWithVersion("nightly") } func TestTiFlashInvalidStorageSection(t *testing.T) { spec := &Specification{} testCases := [][]byte{ []byte(` tiflash_servers: - host: 172.16.5.138 data_dir: /hdd0/tiflash,/hdd1/tiflash config: # storage.main.dir is not defined storage.latest.dir: ["/ssd0/tiflash"] `), []byte(` tiflash_servers: - host: 172.16.5.138 data_dir: /hdd0/tiflash,/hdd1/tiflash config: # storage.main.dir is empty string array storage.main.dir: [] storage.latest.dir: ["/ssd0/tiflash"] `), []byte(` tiflash_servers: - host: 172.16.5.138 data_dir: /hdd0/tiflash,/hdd1/tiflash config: # storage.main.dir is not a string array storage.main.dir: /hdd0/tiflash,/hdd1/tiflash storage.latest.dir: ["/ssd0/tiflash"] `), []byte(` tiflash_servers: - host: 172.16.5.138 data_dir: /hdd0/tiflash,/hdd1/tiflash config: # storage.main.dir is not a string array storage.main.dir: [0, 1] storage.latest.dir: ["/ssd0/tiflash"] `), } for _, testCase := range testCases { err := yaml.Unmarshal(testCase, spec) require.Error(t, err) } } func TestTiCDCDataDir(t *testing.T) { spec := &Specification{} err := yaml.Unmarshal([]byte(` cdc_servers: - host: 172.16.6.191 data_dir: /tidb-data/cdc-8300 `), spec) require.NoError(t, err) cdcComp := FindComponent(spec, ComponentCDC) instances := cdcComp.Instances() require.Equal(t, 1, len(instances)) expected := map[string]struct { configSupported bool dataDir bool // data-dir is set dataDirSupported bool }{ "v4.0.12": {false, false, false}, "v4.0.13": {true, true, false}, "v4.0.14": {true, true, true}, "v5.0.0": {true, true, false}, "v5.0.1": {true, true, false}, "v5.0.2": {true, true, false}, "v5.0.3": {true, true, true}, "v5.1.0": {true, true, true}, "v5.0.0-rc": {false, false, false}, "v6.0.0-alpha": {true, true, true}, "v6.1.0": {true, true, true}, "v99.0.0": {true, true, true}, } checkByVersion := func(version string) { ins := instances[0].(*CDCInstance) cfg := &scripts.CDCScript{ DataDirEnabled: tidbver.TiCDCSupportDataDir(version), ConfigFileEnabled: tidbver.TiCDCSupportConfigFile(version), TLSEnabled: false, DataDir: utils.Ternary(tidbver.TiCDCSupportSortOrDataDir(version), ins.DataDir(), "").(string), } wanted := expected[version] require.Equal(t, wanted.configSupported, cfg.ConfigFileEnabled, version) require.Equal(t, wanted.dataDirSupported, cfg.DataDirEnabled, version) require.Equal(t, wanted.dataDir, len(cfg.DataDir) != 0, version) } for k := range expected { checkByVersion(k) } } func TestTiFlashUsersSettings(t *testing.T) { spec := &Specification{} err := yaml.Unmarshal([]byte(` tiflash_servers: - host: 172.16.5.138 data_dir: /ssd0/tiflash `), spec) require.NoError(t, err) ctx := context.Background() flashComp := FindComponent(spec, ComponentTiFlash) instances := flashComp.Instances() require.Equal(t, 1, len(instances)) // parse using clusterVersion<"v4.0.12" || == "5.0.0-rc" checkBackwardCompatibility := func(ver string) { ins := instances[0].(*TiFlashInstance) dataDirs := MultiDirAbs("", spec.TiFlashServers[0].DataDir) conf, err := ins.initTiFlashConfig(ctx, ver, spec.ServerConfigs.TiFlash, meta.DirPaths{Deploy: spec.TiFlashServers[0].DeployDir, Data: dataDirs, Log: spec.TiFlashServers[0].LogDir}) require.NoError(t, err) // We need an empty string for 'users.default.password' for backward compatibility. Or the TiFlash process will fail to start with older versions if usersSection, ok := conf["users"]; !ok { t.Error("Can not get users section") } else { if defaultUser, ok := usersSection.(map[string]any)["default"]; !ok { t.Error("Can not get default users section") } else { password := defaultUser.(map[string]any)["password"] require.Equal(t, "", password.(string)) } } } checkBackwardCompatibility("v4.0.11") checkBackwardCompatibility("v5.0.0-rc") // parse using clusterVersion>="v4.0.12" checkWithVersion := func(ver string) { ins := instances[0].(*TiFlashInstance) dataDirs := MultiDirAbs("", spec.TiFlashServers[0].DataDir) conf, err := ins.initTiFlashConfig(ctx, ver, spec.ServerConfigs.TiFlash, meta.DirPaths{Deploy: spec.TiFlashServers[0].DeployDir, Data: dataDirs, Log: spec.TiFlashServers[0].LogDir}) require.NoError(t, err) // Those deprecated settings are ignored in newer versions _, ok := conf["users"] require.False(t, ok) } checkWithVersion("v4.0.12") checkWithVersion("v5.0.0") checkWithVersion("nightly") } func TestYAMLAnchor(t *testing.T) { topo := Specification{} decoder := yaml.NewDecoder(bytes.NewReader([]byte(` global: custom: tidb_spec: &tidb_spec deploy_dir: "test-deploy" log_dir: "test-deploy/log" tidb_servers: - <<: *tidb_spec host: 172.16.5.138 deploy_dir: "fake-deploy" `))) decoder.KnownFields(true) err := decoder.Decode(&topo) require.NoError(t, err) require.Equal(t, "172.16.5.138", topo.TiDBServers[0].Host) require.Equal(t, "fake-deploy", topo.TiDBServers[0].DeployDir) require.Equal(t, "test-deploy/log", topo.TiDBServers[0].LogDir) } func TestYAMLAnchorWithUndeclared(t *testing.T) { topo := Specification{} decoder := yaml.NewDecoder(bytes.NewReader([]byte(` global: custom: tidb_spec: &tidb_spec deploy_dir: "test-deploy" log_dir: "test-deploy/log" undeclared: "some stuff" tidb_servers: - <<: *tidb_spec host: 172.16.5.138 `))) decoder.KnownFields(true) err := decoder.Decode(&topo) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "not found")) } tiup-1.16.3/pkg/cluster/spec/testdata/000077500000000000000000000000001505422223000175635ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/spec/testdata/countdir.yaml000066400000000000000000000051611505422223000223010ustar00rootroot00000000000000user: tidb tidb_version: v4.0.2 last_ops_ver: |- v1.3.0 tiup Go Version: go1.13 Git Branch: release-1.3 GitHash: edb12b8 topology: global: user: tidb ssh_port: 22 ssh_type: builtin deploy_dir: deploy data_dir: data os: linux arch: amd64 monitored: node_exporter_port: 21580 blackbox_exporter_port: 21581 deploy_dir: /home/tidb/deploy/monitor-21580 data_dir: /home/tidb/deploy/monitor-21580/data/monitor-21580 log_dir: /home/tidb/deploy/monitor-21580/deploy/monitor-21580/log tidb_servers: - host: 172.17.0.4 ssh_port: 22 imported: true port: 21500 status_port: 21501 deploy_dir: /foo/bar/sometidbpath123/ log_dir: /foo/bar/sometidbpath123//log arch: amd64 os: linux tikv_servers: - host: 172.16.81.162 ssh_port: 22 imported: true port: 21520 status_port: 21530 deploy_dir: /work/foobar456 data_dir: /work/foobar456/data log_dir: /work/foobar456/log arch: amd64 os: linux - host: 172.16.81.205 ssh_port: 22 imported: true port: 21520 status_port: 21530 deploy_dir: /work/foobar456 data_dir: /work/foobar456/data log_dir: /work/foobar456/log arch: amd64 os: linux - host: 172.16.100.199 ssh_port: 22 imported: true port: 21520 status_port: 21530 deploy_dir: /work/foobar456 data_dir: /work/foobar456/data log_dir: /work/foobar456/log arch: amd64 os: linux tiflash_servers: [] pd_servers: - host: 172.18.222.51 ssh_port: 22 name: pd-172.18.222.51-21550 client_port: 21550 peer_port: 21551 deploy_dir: /foo/bar/sometidbpath123/deploy/pd-21550 data_dir: /foo/bar/sometidbpath123/deploy/pd-21550/data log_dir: /foo/bar/sometidbpath123/log/pd-21550 arch: amd64 os: linux - host: 172.18.4.42 ssh_port: 22 name: pd-172.18.4.42-21550 client_port: 21550 peer_port: 21551 deploy_dir: /foo/bar/sometidbpath123/deploy/pd-21550 data_dir: /foo/bar/sometidbpath123/deploy/pd-21550/data log_dir: /foo/bar/sometidbpath123/log/pd-21550 arch: amd64 os: linux - host: 172.18.10.16 ssh_port: 22 name: pd-172.18.10.16-21550 client_port: 21550 peer_port: 21551 deploy_dir: /foo/bar/sometidbpath123/deploy/pd-21550 data_dir: /foo/bar/sometidbpath123/deploy/pd-21550/data log_dir: /foo/bar/sometidbpath123/log/pd-21550 arch: amd64 os: linux monitoring_servers: [] grafana_servers: - host: 172.17.0.4 ssh_port: 22 imported: true port: 21570 deploy_dir: /foo/bar/sometidbpath123/ arch: amd64 os: linux username: admin password: admin tiup-1.16.3/pkg/cluster/spec/testdata/dashboards/000077500000000000000000000000001505422223000216755ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/spec/testdata/dashboards/tidb.json000066400000000000000000000000511505422223000235060ustar00rootroot00000000000000{ "desc": "this is a dummy test file" }tiup-1.16.3/pkg/cluster/spec/testdata/rules/000077500000000000000000000000001505422223000207155ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/spec/testdata/rules/tidb.rules.yml000066400000000000000000000010571505422223000235160ustar00rootroot00000000000000# magic-string-for-test groups: - name: alert.rules rules: - alert: TiDB_schema_error expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 for: 1m labels: env: ENV_LABELS_ENV level: emergency expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 annotations: description: "cluster: ENV_LABELS_ENV, instance: {{ $labels.instance }}, values:{{ $value }}" value: "{{ $value }}" summary: TiDB schema error tiup-1.16.3/pkg/cluster/spec/testdata/topology_err.yaml000066400000000000000000000112711505422223000231750ustar00rootroot00000000000000# # Global variables are applied to all deployments and as the default value of # # them if the specific deployment value missing. global: user: "tidb" ssh_port: 22 deploy_dir: "/tidb/deploy" data_dir: "/tidb/data" # # Monitored variables are used to all the machine monitored: node_exporter_port: 9100 blackbox_exporter_port: 9115 # deploy_dir: "/tidb/deploy/monitored-9100" # data_dir: "/tidb/data/monitored-9100" # log_dir: "/tidb/deploy/monitored-9100/log" server_configs: tidb: log.slow-threshold: 300 binlog.enable: false binlog.ignore-error: false tikv: # server.grpc-concurrency: 4 # raftstore.apply-pool-size: 2 # raftstore.store-pool-size: 2 # rocksdb.max-sub-compactions: 1 # storage.block-cache.capacity: "16GB" # readpool.unified.max-thread-count: 12 readpool.storage.use-unified-pool: true readpool.coprocessor.use-unified-pool: true pd: schedule.leader-schedule-limit: 4 schedule.region-schedule-limit: 2048 schedule.replica-schedule-limit: 64 # tiflash: # logger.level: "info" # tiflash-learner: # log-level: "info" # pump: # gc: 7 pd_servers: - host: 10.160.22.100 # ssh_port: 22 # name: "pd-1" # client_port: 2379 # peer_port: 2380 # deploy_dir: "/tidb/deploy/pd-2379" # data_dir: "/tidb/data/pd-2379" # log_dir: "/tidb/deploy/pd-2379/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.pd` values # config: # schedule.max-merge-region-size: 20 # schedule.max-merge-region-keys: 200000 - host: 10.160.22.101 - host: 10.160.22.102 tidb_servers: - host: 10.160.22.107 # ssh_port: 22 # port: 4000 # status_port: 10080 # deploy_dir: "/tidb/deploy/tidb-4000" # log_dir: "/tidb/deploy/tidb-4000/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.tidb` values # config: # log.slow-query-file: tidb-slow-overwrited.log - host: 10.160.22.108 # - host: 10.0.1.9 tikv_servers: - host: 10.160.22.103 # ssh_port: 22 # port: 20160 # status_port: 20180 # deploy_dir: "/tidb/deploy/tikv-20160" # data_dir: "/tidb/data/tikv-20160" # log_dir: "/tidb/deploy/tikv-20160/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.tikv` values # config: # server.grpc-concurrency: 4 # server.labels: { zone: "zone1", dc: "dc1", host: "host1" } - host: 10.160.22.104 - host: 10.160.22.105 # tiflash_servers: # - host: 10.0.1.10 # ssh_port: 22 # tcp_port: 9000 # flash_service_port: 3930 # flash_proxy_port: 20170 # flash_proxy_status_port: 20292 # metrics_port: 8234 # deploy_dir: /tidb/deploy/tiflash-9000 # data_dir: /tidb/data/tiflash-9000 # log_dir: /tidb/deploy/tiflash-9000/log # numa_node: "0,1" # # Config is used to overwrite the `server_configs.tiflash` values # config: # logger.level: "info" # # The following config is used to overwrite the `server_configs.tiflash-learner` values # learner_config: # log-level: "info" # - host: 10.0.1.15 # - host: 10.0.1.16 # pump_servers: # - host: 10.0.1.17 # ssh_port: 22 # port: 8250 # deploy_dir: "/tidb/deploy/pump-8249" # data_dir: "/tidb/data/pump-8249" # log_dir: "/tidb/deploy/pump-8249/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.drainer` values # config: # gc: 7 # - host: 10.0.1.18 # - host: 10.0.1.19 # drainer_servers: # - host: 10.0.1.17 # port: 8249 # data_dir: "/tidb/data/drainer-8249" # # if drainer doesn't have checkpoint, use initial commitTS to initial checkpoint # # will get a latest timestamp from pd if setting to be -1 (default -1) # commit_ts: -1 # deploy_dir: "/tidb/deploy/drainer-8249" # log_dir: "/tidb/deploy/drainer-8249/log" # numa_node: "0,1" # # Config is used to overwrite the `server_configs.drainer` values # config: # syncer.db-type: "mysql" # syncer.to.host: "127.0.0.1" # syncer.to.user: "root" # syncer.to.password: "" # syncer.to.port: 3306 # - host: 10.0.1.19 monitoring_servers: - host: 10.160.22.106 # ssh_port: 22 # port: 9090 # deploy_dir: "/tidb/deploy/prometheus-8249" # data_dir: "/tidb/data/prometheus-8249" # log_dir: "/tidb/deploy/prometheus-8249/log" grafana_servers: - host: 10.160.22.106 # port: 3000 # deploy_dir: /tidb/deploy/grafana-3000 alertmanager_servers: - host: 10.160.22.106 # ssh_port: 22 # web_port: 9093 # cluster_port: 9094 # deploy_dir: "/tidb/deploy/alertmanager-9093" # data_dir: "/tidb/data/alertmanager-9093" # log_dir: "/tidb/deploy/alertmanager-9093/log" tiup-1.16.3/pkg/cluster/spec/tidb.go000066400000000000000000000232541505422223000172310ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "os" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // TiDBSpec represents the TiDB topology specification in topology.yaml type TiDBSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` ListenHost string `yaml:"listen_host,omitempty"` AdvertiseAddr string `yaml:"advertise_address,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"4000"` StatusPort int `yaml:"status_port" default:"10080"` DeployDir string `yaml:"deploy_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` NumaCores string `yaml:"numa_cores,omitempty" validate:"numa_cores:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Role returns the component role of the instance func (s *TiDBSpec) Role() string { return ComponentTiDB } // SSH returns the host and SSH port of the instance func (s *TiDBSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TiDBSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *TiDBSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *TiDBSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TiDBSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // TiDBComponent represents TiDB component. type TiDBComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiDBComponent) Name() string { return ComponentTiDB } // Role implements Component interface. func (c *TiDBComponent) Role() string { return ComponentTiDB } // Source implements Component interface. func (c *TiDBComponent) Source() string { source := c.Topology.ComponentSources.TiDB if source != "" { return source } return ComponentTiDB } // CalculateVersion implements the Component interface func (c *TiDBComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.TiDB if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *TiDBComponent) SetVersion(version string) { c.Topology.ComponentVersions.TiDB = version } // Instances implements Component interface. func (c *TiDBComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TiDBServers)) for _, s := range c.Topology.TiDBServers { ins = append(ins, &TiDBInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: utils.Ternary(s.ListenHost != "", s.ListenHost, c.Topology.BaseTopo().GlobalOptions.ListenHost).(string), Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: s.NumaCores, Ports: []int{ s.Port, s.StatusPort, }, Dirs: []string{ s.DeployDir, }, StatusFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { return statusByHost(s.GetManageHost(), s.StatusPort, "/status", timeout, tlsCfg) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.StatusPort, timeout, tlsCfg) }, Component: c, }, c.Topology}) } return ins } // TiDBInstance represent the TiDB instance type TiDBInstance struct { BaseInstance topo Topology } // InitConfig implement Instance interface func (i *TiDBInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*TiDBSpec) version := i.CalculateVersion(clusterVersion) pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, utils.JoinHostPort(pdspec.Host, pdspec.ClientPort)) } cfg := &scripts.TiDBScript{ Port: spec.Port, StatusPort: spec.StatusPort, ListenHost: i.GetListenHost(), AdvertiseAddr: utils.Ternary(spec.AdvertiseAddr != "", spec.AdvertiseAddr, spec.Host).(string), PD: strings.Join(pds, ","), SupportSecboot: tidbver.TiDBSupportSecureBoot(version), DeployDir: paths.Deploy, LogDir: paths.Log, NumaNode: spec.NumaNode, NumaCores: spec.NumaCores, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tidb_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_tidb.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.TiDB // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( clusterName, AnsibleImportedConfigPath, fmt.Sprintf( "%s-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort(), ), ) importConfig, err := os.ReadFile(configPath) if err != nil { return err } globalConfig, err = mergeImported(importConfig, globalConfig) if err != nil { return err } } spec.Config, err = i.setTiProxyConfig(ctx, topo, version, spec.Config, paths) if err != nil { return err } // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return err } if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), version, i.OS(), i.Arch(), i.ComponentName()+".toml", paths) } // setTiProxyConfig sets tiproxy session certs func (i *TiDBInstance) setTiProxyConfig(ctx context.Context, topo *Specification, version string, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { if len(topo.TiProxyServers) == 0 || !tidbver.TiDBSupportTiproxy(version) { return configs, nil } if configs == nil { configs = make(map[string]any) } // Overwrite users' configs just like TLS configs. configs["security.session-token-signing-cert"] = fmt.Sprintf( "%s/tls/tiproxy-session.crt", paths.Deploy) configs["security.session-token-signing-key"] = fmt.Sprintf( "%s/tls/tiproxy-session.key", paths.Deploy) return configs, nil } // setTLSConfig set TLS Config to support enable/disable TLS func (i *TiDBInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { // set TLS configs if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.cluster-ssl-ca"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.cluster-ssl-cert"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.cluster-ssl-key"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.cluster-ssl-ca", "security.cluster-ssl-cert", "security.cluster-ssl-key", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // ScaleConfig deploy temporary config on scaling func (i *TiDBInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } func mustBeClusterTopo(topo Topology) *Specification { spec, ok := topo.(*Specification) if !ok { panic("must be cluster spec") } return spec } tiup-1.16.3/pkg/cluster/spec/tiflash.go000066400000000000000000000731711505422223000177440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io" "net/http" "os" "path/filepath" "sort" "strings" "time" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" "gopkg.in/yaml.v3" ) // TiFlashSpec represents the TiFlash topology specification in topology.yaml type TiFlashSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` TCPPort int `yaml:"tcp_port" default:"9000"` HTTPPort int `yaml:"http_port" default:"8123"` // Deprecated since v7.1.0 FlashServicePort int `yaml:"flash_service_port" default:"3930"` FlashProxyPort int `yaml:"flash_proxy_port" default:"20170"` FlashProxyStatusPort int `yaml:"flash_proxy_status_port" default:"20292"` StatusPort int `yaml:"metrics_port" default:"8234"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty" validate:"data_dir:expandable"` LogDir string `yaml:"log_dir,omitempty"` TmpDir string `yaml:"tmp_path,omitempty"` Offline bool `yaml:"offline,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` NumaCores string `yaml:"numa_cores,omitempty" validate:"numa_cores:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` LearnerConfig map[string]any `yaml:"learner_config,omitempty" validate:"learner_config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *TiFlashSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { storeAddr := utils.JoinHostPort(s.Host, s.FlashServicePort) state := checkStoreStatus(ctx, storeAddr, tlsCfg, pdList...) if s.Offline && strings.ToLower(state) == "offline" { state = "Pending Offline" // avoid misleading } return state } const ( // EngineLabelKey is the label that indicates the backend of store instance: // tikv or tiflash. TiFlash instance will contain a label of 'engine: tiflash'. EngineLabelKey = "engine" // EngineLabelTiFlash is the label value, which a TiFlash instance will have with // a label key of EngineLabelKey. EngineLabelTiFlash = "tiflash" // EngineLabelTiFlashCompute is for disaggregated tiflash mode, // it's the label of tiflash_compute nodes. EngineLabelTiFlashCompute = "tiflash_compute" // EngineRoleLabelKey is the label that indicates if the TiFlash instance is a write node. EngineRoleLabelKey = "engine_role" // EngineRoleLabelWrite is for disaggregated tiflash write node. EngineRoleLabelWrite = "write" ) // GetExtendedRole get extended name for TiFlash to distinguish disaggregated mode. func (s *TiFlashSpec) GetExtendedRole(ctx context.Context, tlsCfg *tls.Config, pdList ...string) string { if len(pdList) < 1 { return "" } storeAddr := utils.JoinHostPort(s.Host, s.FlashServicePort) pdapi := api.NewPDClient(ctx, pdList, statusQueryTimeout, tlsCfg) store, err := pdapi.GetCurrentStore(storeAddr) if err != nil { return "" } isWriteNode := false isTiFlash := false for _, label := range store.Store.Labels { if label.Key == EngineLabelKey { if label.Value == EngineLabelTiFlashCompute { return " (compute)" } if label.Value == EngineLabelTiFlash { isTiFlash = true } } if label.Key == EngineRoleLabelKey && label.Value == EngineRoleLabelWrite { isWriteNode = true } if isTiFlash && isWriteNode { return " (write)" } } return "" } // Role returns the component role of the instance func (s *TiFlashSpec) Role() string { return ComponentTiFlash } // SSH returns the host and SSH port of the instance func (s *TiFlashSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TiFlashSpec) GetMainPort() int { return s.TCPPort } // GetManageHost returns the manage host of the instance func (s *TiFlashSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *TiFlashSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TiFlashSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // key names for storage config const ( TiFlashStorageKeyMainDirs string = "storage.main.dir" TiFlashStorageKeyLatestDirs string = "storage.latest.dir" TiFlashStorageKeyRaftDirs string = "storage.raft.dir" TiFlashRemoteCacheDir string = "storage.remote.cache.dir" TiFlashRequiredCPUFlags string = "avx2 popcnt movbe" ) // GetOverrideDataDir returns the data dir. // If users have defined TiFlashStorageKeyMainDirs, then override "DataDir" with // the directories defined in TiFlashStorageKeyMainDirs and TiFlashStorageKeyLatestDirs func (s *TiFlashSpec) GetOverrideDataDir() (string, error) { getStrings := func(key string) []string { var strs []string if dirsVal, ok := s.Config[key]; ok { if dirs, ok := dirsVal.([]any); ok && len(dirs) > 0 { for _, elem := range dirs { if elemStr, ok := elem.(string); ok { elemStr := strings.TrimSuffix(strings.TrimSpace(elemStr), "/") strs = append(strs, elemStr) } } } } return strs } mainDirs := getStrings(TiFlashStorageKeyMainDirs) latestDirs := getStrings(TiFlashStorageKeyLatestDirs) if len(mainDirs) == 0 && len(latestDirs) == 0 { return s.DataDir, nil } // If storage is defined, the path defined in "data_dir" will be ignored // check whether the directories is uniq in the same configuration item // and make the dirSet uniq checkAbsolute := func(d, host, key string) error { if !strings.HasPrefix(d, "/") { return fmt.Errorf("directory '%s' should be an absolute path in 'tiflash_servers:%s.config.%s'", d, host, key) } return nil } dirSet := set.NewStringSet() for _, d := range latestDirs { if err := checkAbsolute(d, s.Host, TiFlashStorageKeyLatestDirs); err != nil { return "", err } if dirSet.Exist(d) { return "", &meta.ValidateErr{ Type: meta.TypeConflict, Target: "directory", LHS: fmt.Sprintf("tiflash_servers:%s.config.%s", s.Host, TiFlashStorageKeyLatestDirs), RHS: fmt.Sprintf("tiflash_servers:%s.config.%s", s.Host, TiFlashStorageKeyLatestDirs), Value: d, } } dirSet.Insert(d) } mainDirSet := set.NewStringSet() for _, d := range mainDirs { if err := checkAbsolute(d, s.Host, TiFlashStorageKeyMainDirs); err != nil { return "", err } if mainDirSet.Exist(d) { return "", &meta.ValidateErr{ Type: meta.TypeConflict, Target: "directory", LHS: fmt.Sprintf("tiflash_servers:%s.config.%s", s.Host, TiFlashStorageKeyMainDirs), RHS: fmt.Sprintf("tiflash_servers:%s.config.%s", s.Host, TiFlashStorageKeyMainDirs), Value: d, } } mainDirSet.Insert(d) dirSet.Insert(d) } // keep the firstPath var firstPath string if len(latestDirs) != 0 { firstPath = latestDirs[0] } else { firstPath = mainDirs[0] } dirSet.Remove(firstPath) // join (stable sorted) paths with "," keys := make([]string, len(dirSet)) i := 0 for k := range dirSet { keys[i] = k i++ } sort.Strings(keys) joinedPaths := firstPath if len(keys) > 0 { joinedPaths += "," + strings.Join(keys, ",") } return joinedPaths, nil } // TiFlashComponent represents TiFlash component. type TiFlashComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiFlashComponent) Name() string { return ComponentTiFlash } // Role implements Component interface. func (c *TiFlashComponent) Role() string { return ComponentTiFlash } // Source implements Component interface. func (c *TiFlashComponent) Source() string { source := c.Topology.ComponentSources.TiFlash if source != "" { return source } return ComponentTiFlash } // CalculateVersion implements the Component interface func (c *TiFlashComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.TiFlash if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *TiFlashComponent) SetVersion(version string) { c.Topology.ComponentVersions.TiFlash = version } // Instances implements Component interface. func (c *TiFlashComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TiFlashServers)) for _, s := range c.Topology.TiFlashServers { tiflashInstance := &TiFlashInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.GetMainPort(), SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: s.NumaCores, Ports: []int{ s.TCPPort, s.FlashServicePort, s.FlashProxyPort, s.FlashProxyStatusPort, s.StatusPort, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.StatusPort, timeout, tlsCfg) }, Component: c, }, c.Topology} // For 7.1.0 or later, TiFlash HTTP service is removed, so we don't need to set http_port if !tidbver.TiFlashNotNeedHTTPPortConfig(c.Topology.ComponentVersions.TiFlash) { tiflashInstance.Ports = append(tiflashInstance.Ports, s.HTTPPort) } ins = append(ins, tiflashInstance) } return ins } // TiFlashInstance represent the TiFlash instance type TiFlashInstance struct { BaseInstance topo Topology } // GetServicePort returns the service port of TiFlash func (i *TiFlashInstance) GetServicePort() int { return i.InstanceSpec.(*TiFlashSpec).FlashServicePort } // GetStatusPort returns the status port of TiFlash func (i *TiFlashInstance) GetStatusPort() int { return i.InstanceSpec.(*TiFlashSpec).FlashProxyStatusPort } // checkIncorrectKey checks TiFlash's key should not be set in config func (i *TiFlashInstance) checkIncorrectKey(key string) error { errMsg := "NOTE: TiFlash `%s` should NOT be set in topo's \"%s\" config, its value will be ignored, you should set `data_dir` in each host instead, please check your topology" if dir, ok := i.InstanceSpec.(*TiFlashSpec).Config[key].(string); ok && dir != "" { return fmt.Errorf(errMsg, key, "host") } if dir, ok := i.topo.(*Specification).ServerConfigs.TiFlash[key].(string); ok && dir != "" { return fmt.Errorf(errMsg, key, "server_configs") } return nil } // checkIncorrectServerConfigs checks TiFlash's key should not be set in server_config func (i *TiFlashInstance) checkIncorrectServerConfigs(key string) error { errMsg := "NOTE: TiFlash `%[1]s` should NOT be set in topo's \"%[2]s\" config, you should set `%[1]s` in each host instead, please check your topology" if _, ok := i.topo.(*Specification).ServerConfigs.TiFlash[key]; ok { return fmt.Errorf(errMsg, key, "server_configs") } return nil } // isValidStringArray detect the key in `config` is valid or not. // The configuration is valid only the key-value is defined, and the // value is a non-empty string array. // Return (key is defined or not, the value is valid or not) func isValidStringArray(key string, config map[string]any, couldEmpty bool) (bool, error) { var ( dirsVal any isKeyDefined bool isAllElemsString = true ) if dirsVal, isKeyDefined = config[key]; !isKeyDefined { return isKeyDefined, nil } if dirs, ok := dirsVal.([]any); ok && (couldEmpty || len(dirs) > 0) { // ensure dirs is non-empty string array for _, elem := range dirs { if _, ok := elem.(string); !ok { isAllElemsString = false break } } if isAllElemsString { return isKeyDefined, nil } } return isKeyDefined, fmt.Errorf("'%s' should be a non-empty string array, please check the tiflash configuration in your yaml file", TiFlashStorageKeyMainDirs) } // checkTiFlashStorageConfig ensures `storage.main` is defined when // `storage.latest` or `storage.raft` is used. func checkTiFlashStorageConfig(config map[string]any) (bool, error) { isMainStorageDefined, err := isValidStringArray(TiFlashStorageKeyMainDirs, config, false) if err != nil { return false, err } if !isMainStorageDefined { for k := range config { if strings.HasPrefix(k, "storage.latest") || strings.HasPrefix(k, "storage.raft") { return false, fmt.Errorf("you must set '%s' before setting '%s', please check the tiflash configuration in your yaml file", TiFlashStorageKeyMainDirs, k) } } } return isMainStorageDefined, nil } // CheckIncorrectConfigs checks incorrect settings func (i *TiFlashInstance) CheckIncorrectConfigs() error { // data_dir / path should not be set in config if err := i.checkIncorrectKey("data_dir"); err != nil { return err } if err := i.checkIncorrectKey("path"); err != nil { return err } // storage.main/latest/raft.dir should not be set in server_config if err := i.checkIncorrectServerConfigs(TiFlashStorageKeyMainDirs); err != nil { return err } if err := i.checkIncorrectServerConfigs(TiFlashStorageKeyLatestDirs); err != nil { return err } if err := i.checkIncorrectServerConfigs(TiFlashStorageKeyRaftDirs); err != nil { return err } // storage.* in instance level if _, err := checkTiFlashStorageConfig(i.InstanceSpec.(*TiFlashSpec).Config); err != nil { return err } // no matter storage.latest.dir is defined or not, return err _, err := isValidStringArray(TiFlashStorageKeyLatestDirs, i.InstanceSpec.(*TiFlashSpec).Config, true) return err } // need to check the configuration after clusterVersion >= v4.0.9. func checkTiFlashStorageConfigWithVersion(clusterVersion string, config map[string]any) (bool, error) { if tidbver.TiFlashSupportMultiDisksDeployment(clusterVersion) { return checkTiFlashStorageConfig(config) } return false, nil } // InitTiFlashConfig initializes TiFlash config file with the configurations in server_configs func (i *TiFlashInstance) initTiFlashConfig(ctx context.Context, version string, src map[string]any, paths meta.DirPaths) (map[string]any, error) { var ( pathConfig string isStorageDirsDefined bool deprecatedUsersConfig string daemonConfig string markCacheSize string err error ) if isStorageDirsDefined, err = checkTiFlashStorageConfigWithVersion(version, src); err != nil { return nil, err } // For backward compatibility, we need to rollback to set 'path' if isStorageDirsDefined { pathConfig = "#" } else { pathConfig = fmt.Sprintf(`path: "%s"`, strings.Join(paths.Data, ",")) } if tidbver.TiFlashDeprecatedUsersConfig(version) { // For v4.0.12 or later, 5.0.0 or later, TiFlash can ignore these `user.*`, `quotas.*` settings deprecatedUsersConfig = "#" } else { // These settings is required when the version is earlier than v4.0.12 and v5.0.0 deprecatedUsersConfig = ` quotas.default.interval.duration: 3600 quotas.default.interval.errors: 0 quotas.default.interval.execution_time: 0 quotas.default.interval.queries: 0 quotas.default.interval.read_rows: 0 quotas.default.interval.result_rows: 0 users.default.password: "" users.default.profile: "default" users.default.quota: "default" users.default.networks.ip: "::/0" users.readonly.password: "" users.readonly.profile: "readonly" users.readonly.quota: "default" users.readonly.networks.ip: "::/0" profiles.default.load_balancing: "random" profiles.default.use_uncompressed_cache: 0 profiles.readonly.readonly: 1 ` } tidbStatusAddrs := []string{} for _, tidb := range i.topo.(*Specification).TiDBServers { tidbStatusAddrs = append(tidbStatusAddrs, utils.JoinHostPort(tidb.Host, tidb.StatusPort)) } spec := i.InstanceSpec.(*TiFlashSpec) enableTLS := i.topo.(*Specification).GlobalOptions.TLSEnabled httpPort := "#" // For 7.1.0 or later, TiFlash HTTP service is removed, so we don't need to set http_port if !tidbver.TiFlashNotNeedHTTPPortConfig(version) { if enableTLS { httpPort = fmt.Sprintf(`https_port: %d`, spec.HTTPPort) } else { httpPort = fmt.Sprintf(`http_port: %d`, spec.HTTPPort) } } tcpPort := "#" // Config tcp_port is only required for TiFlash version < 7.1.0, and is recommended to not specify for TiFlash version >= 7.1.0. if tidbver.TiFlashRequiresTCPPortConfig(version) { tcpPort = fmt.Sprintf(`tcp_port: %d`, spec.TCPPort) } // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return nil, err } topo := Specification{} if tidbver.TiFlashNotNeedSomeConfig(version) { // For 5.4.0 or later, TiFlash can ignore application.runAsDaemon and mark_cache_size setting daemonConfig = "#" markCacheSize = "#" } else { daemonConfig = `application.runAsDaemon: true` markCacheSize = `mark_cache_size: 5368709120` } err = yaml.Unmarshal(fmt.Appendf(nil, ` server_configs: tiflash: default_profile: "default" display_name: "TiFlash" listen_host: "%[7]s" tmp_path: "%[11]s" %[1]s %[3]s %[4]s flash.tidb_status_addr: "%[5]s" flash.service_addr: "%[6]s" flash.flash_cluster.cluster_manager_path: "%[10]s/bin/tiflash/flash_cluster_manager" flash.flash_cluster.log: "%[2]s/tiflash_cluster_manager.log" flash.flash_cluster.master_ttl: 60 flash.flash_cluster.refresh_interval: 20 flash.flash_cluster.update_rule_interval: 5 flash.proxy.config: "%[10]s/conf/tiflash-learner.toml" status.metrics_port: %[8]d logger.errorlog: "%[2]s/tiflash_error.log" logger.log: "%[2]s/tiflash.log" logger.count: 20 logger.size: "1000M" %[13]s raft.pd_addr: "%[9]s" %[12]s %[14]s `, pathConfig, paths.Log, tcpPort, httpPort, strings.Join(tidbStatusAddrs, ","), utils.JoinHostPort(spec.Host, spec.FlashServicePort), i.GetListenHost(), spec.StatusPort, strings.Join(i.topo.(*Specification).GetPDList(), ","), paths.Deploy, fmt.Sprintf("%s/tmp", paths.Data[0]), deprecatedUsersConfig, daemonConfig, markCacheSize, ), &topo) if err != nil { return nil, err } conf := MergeConfig(topo.ServerConfigs.TiFlash, spec.Config, src) return conf, nil } func (i *TiFlashInstance) mergeTiFlashInstanceConfig(clusterVersion string, globalConf, instanceConf map[string]any) (map[string]any, error) { var ( isStorageDirsDefined bool err error conf map[string]any ) if isStorageDirsDefined, err = checkTiFlashStorageConfigWithVersion(clusterVersion, instanceConf); err != nil { return nil, err } if isStorageDirsDefined { delete(globalConf, "path") } conf = MergeConfig(globalConf, instanceConf) return conf, nil } // InitTiFlashLearnerConfig initializes TiFlash learner config file func (i *TiFlashInstance) InitTiFlashLearnerConfig(ctx context.Context, clusterVersion string, src map[string]any, paths meta.DirPaths) (map[string]any, error) { spec := i.InstanceSpec.(*TiFlashSpec) topo := Specification{} var statusAddr string if tidbver.TiFlashSupportAdvertiseStatusAddr(clusterVersion) { statusAddr = fmt.Sprintf(`server.status-addr: "%s" server.advertise-status-addr: "%s"`, utils.JoinHostPort(i.GetListenHost(), spec.FlashProxyStatusPort), utils.JoinHostPort(spec.Host, spec.FlashProxyStatusPort)) } else { statusAddr = fmt.Sprintf(`server.status-addr: "%s"`, utils.JoinHostPort(spec.Host, spec.FlashProxyStatusPort)) } err := yaml.Unmarshal(fmt.Appendf(nil, ` server_configs: tiflash-learner: log-file: "%[1]s/tiflash_tikv.log" server.engine-addr: "%[2]s" server.addr: "%[3]s" server.advertise-addr: "%[4]s" %[5]s storage.data-dir: "%[6]s/flash" rocksdb.wal-dir: "" security.ca-path: "" security.cert-path: "" security.key-path: "" # Normally the number of TiFlash nodes is smaller than TiKV nodes, and we need more raft threads to match the write speed of TiKV. raftstore.apply-pool-size: 4 raftstore.store-pool-size: 4 `, paths.Log, utils.JoinHostPort(spec.Host, spec.FlashServicePort), utils.JoinHostPort(i.GetListenHost(), spec.FlashProxyPort), utils.JoinHostPort(spec.Host, spec.FlashProxyPort), statusAddr, paths.Data[0], ), &topo) if err != nil { return nil, err } enableTLS := i.topo.(*Specification).GlobalOptions.TLSEnabled // set TLS configs spec.LearnerConfig, err = i.setTLSConfigWithTiFlashLearner(enableTLS, spec.LearnerConfig, paths) if err != nil { return nil, err } conf := MergeConfig(topo.ServerConfigs.TiFlashLearner, spec.LearnerConfig, src) return conf, nil } // setTLSConfigWithTiFlashLearner set TLS Config to support enable/disable TLS func (i *TiFlashInstance) setTLSConfigWithTiFlashLearner(enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.ca-path"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.cert-path"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.key-path"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.ca-path", "security.cert-path", "security.key-path", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // setTLSConfig set TLS Config to support enable/disable TLS func (i *TiFlashInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.ca_path"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.cert_path"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.key_path"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.ca_path", "security.cert_path", "security.key_path", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // getTiFlashRequiredCPUFlagsWithVersion return required CPU flags for TiFlash by given version func getTiFlashRequiredCPUFlagsWithVersion(clusterVersion string, arch string) string { arch = strings.ToLower(arch) if arch == "x86_64" || arch == "amd64" { if tidbver.TiFlashRequireCPUFlagAVX2(clusterVersion) { return TiFlashRequiredCPUFlags } } return "" } // InitConfig implement Instance interface func (i *TiFlashInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } spec := i.InstanceSpec.(*TiFlashSpec) version := i.CalculateVersion(clusterVersion) cfg := &scripts.TiFlashScript{ RequiredCPUFlags: getTiFlashRequiredCPUFlagsWithVersion(version, spec.Arch), DeployDir: paths.Deploy, LogDir: paths.Log, NumaNode: spec.NumaNode, NumaCores: spec.NumaCores, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tiflash_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_tiflash.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } conf, err := i.InitTiFlashLearnerConfig(ctx, version, topo.ServerConfigs.TiFlashLearner, paths) if err != nil { return err } // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( clusterName, AnsibleImportedConfigPath, fmt.Sprintf( "%s-learner-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort(), ), ) importConfig, err := os.ReadFile(configPath) if err != nil { return err } conf, err = mergeImported(importConfig, conf) if err != nil { return err } } err = i.mergeTiFlashLearnerServerConfig(ctx, e, conf, spec.LearnerConfig, paths) if err != nil { return err } // Init the configuration using cfg and server_configs if conf, err = i.initTiFlashConfig(ctx, version, topo.ServerConfigs.TiFlash, paths); err != nil { return err } // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( clusterName, AnsibleImportedConfigPath, fmt.Sprintf( "%s-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort(), ), ) importConfig, err := os.ReadFile(configPath) if err != nil { return err } // TODO: maybe we also need to check the imported config? // if _, err = checkTiFlashStorageConfigWithVersion(clusterVersion, importConfig); err != nil { // return err // } conf, err = mergeImported(importConfig, conf) if err != nil { return err } } // Check the configuration of instance level if conf, err = i.mergeTiFlashInstanceConfig(version, conf, spec.Config); err != nil { return err } return i.MergeServerConfig(ctx, e, conf, nil, paths) } // ScaleConfig deploy temporary config on scaling func (i *TiFlashInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } type replicateConfig struct { EnablePlacementRules string `json:"enable-placement-rules"` } // PrepareStart checks TiFlash requirements before starting func (i *TiFlashInstance) PrepareStart(ctx context.Context, tlsCfg *tls.Config) error { // set enable-placement-rules to true via PDClient enablePlacementRules, err := json.Marshal(replicateConfig{ EnablePlacementRules: "true", }) // this should not failed, else exit if err != nil { return perrs.Annotate(err, "failed to marshal replicate config") } var topo Topology if topoVal := ctx.Value(ctxt.CtxBaseTopo); topoVal != nil { // in scale-out phase var ok bool topo, ok = topoVal.(Topology) if !ok { return perrs.New("base topology in context is invalid") } } else { // in start phase topo = i.topo } endpoints := topo.(*Specification).GetPDListWithManageHost() pdClient := api.NewPDClient(ctx, endpoints, 10*time.Second, tlsCfg) return pdClient.UpdateReplicateConfig(bytes.NewBuffer(enablePlacementRules)) } // Ready implements Instance interface func (i *TiFlashInstance) Ready(ctx context.Context, e ctxt.Executor, timeout uint64, tlsCfg *tls.Config) error { // FIXME: the timeout is applied twice in the whole `Ready()` process, in the worst // case it might wait double time as other components if err := PortStarted(ctx, e, i.GetServicePort(), timeout); err != nil { return err } scheme := "http" if i.topo.BaseTopo().GlobalOptions.TLSEnabled { scheme = "https" } addr := fmt.Sprintf("%s://%s/tiflash/store-status", scheme, utils.JoinHostPort(i.GetManageHost(), i.GetStatusPort())) req, err := http.NewRequest("GET", addr, nil) if err != nil { return err } req = req.WithContext(ctx) retryOpt := utils.RetryOption{ Delay: time.Second, Timeout: time.Second * time.Duration(timeout), } var queryErr error if err := utils.Retry(func() error { client := utils.NewHTTPClient(statusQueryTimeout, tlsCfg) res, err := client.Client().Do(req) if err != nil { queryErr = err return err } defer res.Body.Close() body, err := io.ReadAll(res.Body) if err != nil { queryErr = err return err } if res.StatusCode == http.StatusNotFound || string(body) == "Running" { return nil } err = fmt.Errorf("tiflash store status is '%s', not fully running yet", string(body)) queryErr = err return err }, retryOpt); err != nil { return perrs.Annotatef(queryErr, "timed out waiting for tiflash %s:%d to be ready after %ds", i.Host, i.Port, timeout) } return nil } tiup-1.16.3/pkg/cluster/spec/tikv.go000066400000000000000000000420321505422223000172570ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "errors" "fmt" "net/http" "net/url" "os" "path/filepath" "strconv" "strings" "time" perrs "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" dto "github.com/prometheus/client_model/go" "github.com/prometheus/prom2json" "go.uber.org/zap" ) const ( metricNameRegionCount = "tikv_raftstore_region_count" labelNameLeaderCount = "leader" ) // TiKVSpec represents the TiKV topology specification in topology.yaml type TiKVSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` ListenHost string `yaml:"listen_host,omitempty"` AdvertiseAddr string `yaml:"advertise_addr,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"20160"` StatusPort int `yaml:"status_port" default:"20180"` AdvertiseStatusAddr string `yaml:"advertise_status_addr,omitempty"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Offline bool `yaml:"offline,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` NumaCores string `yaml:"numa_cores,omitempty" validate:"numa_cores:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // checkStoreStatus checks the store status in current cluster func checkStoreStatus(ctx context.Context, storeAddr string, tlsCfg *tls.Config, pdList ...string) string { if len(pdList) < 1 { return "N/A" } pdapi := api.NewPDClient(ctx, pdList, statusQueryTimeout, tlsCfg) store, err := pdapi.GetCurrentStore(storeAddr) if err != nil { if errors.Is(err, api.ErrNoStore) { return "N/A" } return "Down" } return store.Store.StateName } // Status queries current status of the instance func (s *TiKVSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { storeAddr := addr(s) state := checkStoreStatus(ctx, storeAddr, tlsCfg, pdList...) if s.Offline && strings.ToLower(state) == "offline" { state = "Pending Offline" // avoid misleading } return state } // Role returns the component role of the instance func (s *TiKVSpec) Role() string { return ComponentTiKV } // SSH returns the host and SSH port of the instance func (s *TiKVSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TiKVSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *TiKVSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *TiKVSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TiKVSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // Labels returns the labels of TiKV func (s *TiKVSpec) Labels() (map[string]string, error) { lbs := make(map[string]string) if serverLabels := GetValueFromPath(s.Config, "server.labels"); serverLabels != nil { m := map[any]any{} if sm, ok := serverLabels.(map[string]any); ok { for k, v := range sm { m[k] = v } } else if im, ok := serverLabels.(map[any]any); ok { m = im } for k, v := range m { key, ok := k.(string) if !ok { return nil, perrs.Errorf("TiKV label name %v is not a string, check the instance: %s:%d", k, s.Host, s.GetMainPort()) } value, ok := v.(string) if !ok { return nil, perrs.Errorf("TiKV label value %v is not a string, check the instance: %s:%d", v, s.Host, s.GetMainPort()) } lbs[key] = value } } return lbs, nil } // TiKVComponent represents TiKV component. type TiKVComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiKVComponent) Name() string { return ComponentTiKV } // Role implements Component interface. func (c *TiKVComponent) Role() string { return ComponentTiKV } // Source implements Component interface. func (c *TiKVComponent) Source() string { source := c.Topology.ComponentSources.TiKV if source != "" { return source } return ComponentTiKV } // CalculateVersion implements the Component interface func (c *TiKVComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.TiKV if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *TiKVComponent) SetVersion(version string) { c.Topology.ComponentVersions.TiKV = version } // Instances implements Component interface. func (c *TiKVComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TiKVServers)) for _, s := range c.Topology.TiKVServers { ins = append(ins, &TiKVInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: utils.Ternary(s.ListenHost != "", s.ListenHost, c.Topology.BaseTopo().GlobalOptions.ListenHost).(string), Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: s.NumaCores, Ports: []int{ s.Port, s.StatusPort, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.StatusPort, timeout, tlsCfg) }, Component: c, }, c.Topology, 0}) } return ins } // TiKVInstance represent the TiDB instance type TiKVInstance struct { BaseInstance topo Topology leaderCountBeforeRestart int } // InitConfig implement Instance interface func (i *TiKVInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*TiKVSpec) pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, utils.JoinHostPort(pdspec.Host, pdspec.ClientPort)) } cfg := &scripts.TiKVScript{ Addr: utils.JoinHostPort(i.GetListenHost(), spec.Port), AdvertiseAddr: utils.Ternary(spec.AdvertiseAddr != "", spec.AdvertiseAddr, utils.JoinHostPort(spec.Host, spec.Port)).(string), StatusAddr: utils.JoinHostPort(i.GetListenHost(), spec.StatusPort), SupportAdvertiseStatusAddr: tidbver.TiKVSupportAdvertiseStatusAddr(clusterVersion), AdvertiseStatusAddr: utils.Ternary(spec.AdvertiseStatusAddr != "", spec.AdvertiseStatusAddr, utils.JoinHostPort(spec.Host, spec.StatusPort)).(string), PD: strings.Join(pds, ","), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, NumaNode: spec.NumaNode, NumaCores: spec.NumaCores, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tikv_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_tikv.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.TiKV // merge config files for imported instance if i.IsImported() { configPath := ClusterPath( clusterName, AnsibleImportedConfigPath, fmt.Sprintf( "%s-%s-%d.toml", i.ComponentName(), i.GetHost(), i.GetPort(), ), ) importConfig, err := os.ReadFile(configPath) if err != nil { return err } globalConfig, err = mergeImported(importConfig, globalConfig) if err != nil { return err } } // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return err } if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), clusterVersion, i.OS(), i.Arch(), i.ComponentName()+".toml", paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *TiKVInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.ca-path"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.cert-path"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.key-path"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.ca-path", "security.cert-path", "security.key-path", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // ScaleConfig deploy temporary config on scaling func (i *TiKVInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } var _ RollingUpdateInstance = &TiKVInstance{} // PreRestart implements RollingUpdateInstance interface. func (i *TiKVInstance) PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, updcfg *UpdateConfig) error { timeoutOpt := &utils.RetryOption{ Timeout: time.Second * time.Duration(apiTimeoutSeconds), Delay: time.Second * 2, } tidbTopo, ok := topo.(*Specification) if !ok { panic("should be type of tidb topology") } if len(tidbTopo.TiKVServers) <= 1 { return nil } pdClient := api.NewPDClient(ctx, tidbTopo.GetPDListWithManageHost(), 5*time.Second, tlsCfg) // Make sure there's leader of PD. // Although we evict pd leader when restart pd, // But when there's only one PD instance the pd might not serve request right away after restart. err := pdClient.WaitLeader(timeoutOpt) if err != nil { return err } // Get and record the leader count before evict leader. leaderCount, err := genLeaderCounter(tidbTopo, tlsCfg)(i.ID()) if err != nil { return perrs.Annotatef(err, "failed to get leader count %s", i.GetHost()) } i.leaderCountBeforeRestart = leaderCount if err := pdClient.EvictStoreLeader(addr(i.InstanceSpec.(*TiKVSpec)), timeoutOpt, genLeaderCounter(tidbTopo, tlsCfg)); err != nil { if !utils.IsTimeoutOrMaxRetry(err) { return perrs.Annotatef(err, "failed to evict store leader %s", i.GetHost()) } ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger). Warnf("Ignore evicting store leader from %s, %v", i.ID(), err) } return nil } // PostRestart implements RollingUpdateInstance interface. func (i *TiKVInstance) PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, updcfg *UpdateConfig) error { tidbTopo, ok := topo.(*Specification) if !ok { panic("should be type of tidb topology") } if len(tidbTopo.TiKVServers) <= 1 { return nil } pdClient := api.NewPDClient(ctx, tidbTopo.GetPDListWithManageHost(), 5*time.Second, tlsCfg) // remove store leader evict scheduler after restart if err := pdClient.RemoveStoreEvict(addr(i.InstanceSpec.(*TiKVSpec))); err != nil { return perrs.Annotatef(err, "failed to remove evict store scheduler for %s", i.GetHost()) } if i.leaderCountBeforeRestart > 0 { if err := pdClient.RecoverStoreLeader(addr(i.InstanceSpec.(*TiKVSpec)), i.leaderCountBeforeRestart, nil, genLeaderCounter(tidbTopo, tlsCfg)); err != nil { if !utils.IsTimeoutOrMaxRetry(err) { return perrs.Annotatef(err, "failed to recover store leader %s", i.GetHost()) } ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger). Warnf("Ignore recovering store leader from %s, %v", i.ID(), err) } } return nil } func addr(spec *TiKVSpec) string { if spec.AdvertiseAddr != "" { return spec.AdvertiseAddr } if spec.Port == 0 || spec.Port == 80 { panic(fmt.Sprintf("invalid TiKV port %d", spec.Port)) } return utils.JoinHostPort(spec.Host, spec.Port) } func genLeaderCounter(topo *Specification, tlsCfg *tls.Config) func(string) (int, error) { return func(id string) (int, error) { statusAddress := "" foundIDs := []string{} for _, kv := range topo.TiKVServers { kvid := utils.JoinHostPort(kv.Host, kv.Port) if id == kvid { statusAddress = utils.JoinHostPort(kv.GetManageHost(), kv.StatusPort) break } foundIDs = append(foundIDs, kvid) } if statusAddress == "" { return 0, fmt.Errorf("TiKV instance with ID %s not found, found %s", id, strings.Join(foundIDs, ",")) } transport := makeTransport(tlsCfg) mfChan := make(chan *dto.MetricFamily, 1024) go func() { addr := fmt.Sprintf("http://%s/metrics", statusAddress) // XXX: https://github.com/tikv/tikv/issues/5340 // Some TiKV versions don't handle https correctly // So we check if it's in that case first if tlsCfg != nil && checkHTTPS(fmt.Sprintf("https://%s/metrics", statusAddress), tlsCfg) == nil { addr = fmt.Sprintf("https://%s/metrics", statusAddress) } if err := prom2json.FetchMetricFamilies(addr, mfChan, transport); err != nil { zap.L().Error("failed counting leader", zap.String("host", id), zap.String("status addr", addr), zap.Error(err), ) } }() fms := []*prom2json.Family{} for mf := range mfChan { fm := prom2json.NewFamily(mf) fms = append(fms, fm) } for _, fm := range fms { if fm.Name != metricNameRegionCount { continue } for _, m := range fm.Metrics { if m, ok := m.(prom2json.Metric); ok && m.Labels["type"] == labelNameLeaderCount { return strconv.Atoi(m.Value) } } } return 0, perrs.Errorf("metric %s{type=\"%s\"} not found", metricNameRegionCount, labelNameLeaderCount) } } func makeTransport(tlsCfg *tls.Config) *http.Transport { // Start with the DefaultTransport for sane defaults. transport := http.DefaultTransport.(*http.Transport).Clone() // Conservatively disable HTTP keep-alive as this program will only // ever need a single HTTP request. transport.DisableKeepAlives = true // Timeout early if the server doesn't even return the headers. transport.ResponseHeaderTimeout = time.Minute // We should clone a tlsCfg because we use it across goroutine if tlsCfg != nil { transport.TLSClientConfig = tlsCfg.Clone() } // prefer to use the inner http proxy httpProxy := os.Getenv("TIUP_INNER_HTTP_PROXY") if len(httpProxy) == 0 { httpProxy = os.Getenv("HTTP_PROXY") } if len(httpProxy) > 0 { if proxyURL, err := url.Parse(httpProxy); err == nil { transport.Proxy = http.ProxyURL(proxyURL) } } return transport } // Check if the url works with tlsCfg func checkHTTPS(url string, tlsCfg *tls.Config) error { transport := makeTransport(tlsCfg) req, err := http.NewRequest("GET", url, nil) if err != nil { return perrs.Annotatef(err, "creating GET request for URL %q failed", url) } client := http.Client{Transport: transport} resp, err := client.Do(req) if err != nil { return perrs.Annotatef(err, "executing GET request for URL %q failed", url) } resp.Body.Close() return nil } tiup-1.16.3/pkg/cluster/spec/tikv_cdc.go000066400000000000000000000255411505422223000200760ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // TiKVCDCSpec represents the TiKVCDC topology specification in topology.yaml type TiKVCDCSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"8600"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Offline bool `yaml:"offline,omitempty"` GCTTL int64 `yaml:"gc-ttl,omitempty" validate:"gc-ttl:editable"` TZ string `yaml:"tz,omitempty" validate:"tz:editable"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` ResourceControl meta.ResourceControl `yaml:"resource_control,omitempty" validate:"resource_control:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Role returns the component role of the instance func (s *TiKVCDCSpec) Role() string { return ComponentTiKVCDC } // SSH returns the host and SSH port of the instance func (s *TiKVCDCSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TiKVCDCSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *TiKVCDCSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *TiKVCDCSpec) IsImported() bool { // TiDB-Ansible do not support TiKV-CDC return false } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TiKVCDCSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // TiKVCDCComponent represents TiKV-CDC component. type TiKVCDCComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiKVCDCComponent) Name() string { return ComponentTiKVCDC } // Role implements Component interface. func (c *TiKVCDCComponent) Role() string { return ComponentTiKVCDC } // Source implements Component interface. func (c *TiKVCDCComponent) Source() string { source := c.Topology.ComponentSources.TiKVCDC if source != "" { return source } return ComponentTiKVCDC } // CalculateVersion implements the Component interface func (c *TiKVCDCComponent) CalculateVersion(clusterVersion string) string { // always not follow global version, use ""(latest) by default version := c.Topology.ComponentVersions.TiKVCDC return version } // SetVersion implements Component interface. func (c *TiKVCDCComponent) SetVersion(version string) { c.Topology.ComponentVersions.TiKVCDC = version } // Instances implements Component interface. func (c *TiKVCDCComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TiKVCDCServers)) for _, s := range c.Topology.TiKVCDCServers { instance := &TiKVCDCInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, }, StatusFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { return statusByHost(s.GetManageHost(), s.Port, "/status", timeout, tlsCfg) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, c.Topology} if s.DataDir != "" { instance.Dirs = append(instance.Dirs, s.DataDir) } ins = append(ins, instance) } return ins } // TiKVCDCInstance represent the TiKV-CDC instance. type TiKVCDCInstance struct { BaseInstance topo Topology } // ScaleConfig deploy temporary config on scaling func (i *TiKVCDCInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, user string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, user, paths) } // InitConfig implements Instance interface. func (i *TiKVCDCInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { if !tidbver.TiKVCDCSupportDeploy(clusterVersion) { return errors.New("tikv-cdc only supports cluster version v6.2.0 or later") } topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*TiKVCDCSpec) globalConfig := topo.ServerConfigs.TiKVCDC instanceConfig := spec.Config pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, pdspec.GetAdvertiseClientURL(enableTLS)) } cfg := &scripts.TiKVCDCScript{ Addr: utils.JoinHostPort(i.GetListenHost(), spec.Port), AdvertiseAddr: utils.JoinHostPort(spec.Host, spec.Port), PD: strings.Join(pds, ","), GCTTL: spec.GCTTL, TZ: spec.TZ, TLSEnabled: enableTLS, DeployDir: paths.Deploy, LogDir: paths.Log, DataDir: paths.Data[0], NumaNode: spec.NumaNode, } // doesn't work. if _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil { return err } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tikv-cdc_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_tikv-cdc.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } return i.MergeServerConfig(ctx, e, globalConfig, instanceConfig, paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *TiKVCDCInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } var _ RollingUpdateInstance = &TiKVCDCInstance{} // PreRestart implements RollingUpdateInstance interface. // All errors are ignored, to trigger hard restart. func (i *TiKVCDCInstance) PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, updcfg *UpdateConfig) error { tidbTopo, ok := topo.(*Specification) if !ok { panic("should be type of tidb topology") } logger, ok := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) if !ok { panic("logger not found") } address := utils.JoinHostPort(i.GetHost(), i.GetPort()) // cdc rolling upgrade strategy only works if there are more than 2 captures if len(tidbTopo.TiKVCDCServers) <= 1 { logger.Debugf("tikv-cdc pre-restart skipped, only one capture in the topology, addr: %s", address) return nil } start := time.Now() client := api.NewTiKVCDCOpenAPIClient(ctx, []string{utils.JoinHostPort(i.GetManageHost(), i.GetPort())}, 5*time.Second, tlsCfg) captures, err := client.GetAllCaptures() if err != nil { logger.Debugf("tikv-cdc pre-restart skipped, cannot get all captures, trigger hard restart, addr: %s, elapsed: %+v", address, time.Since(start)) return nil } var ( captureID string found bool isOwner bool ) for _, capture := range captures { if address == capture.AdvertiseAddr { found = true captureID = capture.ID isOwner = capture.IsOwner break } } // this may happen if the capture crashed right away. if !found { logger.Debugf("tikv-cdc pre-restart finished, cannot found the capture, trigger hard restart, captureID: %s, addr: %s, elapsed: %+v", captureID, address, time.Since(start)) return nil } if isOwner { if err := client.ResignOwner(); err != nil { // if resign the owner failed, no more need to drain the current capture, // since it's not allowed by the cdc. // return nil to trigger hard restart. logger.Debugf("tikv-cdc pre-restart finished, resign owner failed, trigger hard restart, captureID: %s, addr: %s, elapsed: %+v", captureID, address, time.Since(start)) return nil } } // TODO: support drain capture to make restart smooth. logger.Debugf("tikv-cdc pre-restart success, captureID: %s, addr: %s, elapsed: %+v", captureID, address, time.Since(start)) return nil } // PostRestart implements RollingUpdateInstance interface. func (i *TiKVCDCInstance) PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, updcfg *UpdateConfig) error { logger, ok := ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger) if !ok { panic("logger not found") } start := time.Now() address := utils.JoinHostPort(i.GetHost(), i.GetPort()) client := api.NewTiKVCDCOpenAPIClient(ctx, []string{utils.JoinHostPort(i.GetManageHost(), i.GetPort())}, 5*time.Second, tlsCfg) err := client.IsCaptureAlive() if err != nil { logger.Debugf("tikv-cdc post-restart finished, get capture status failed, addr: %s, err: %+v, elapsed: %+v", address, err, time.Since(start)) return nil } logger.Debugf("tikv-cdc post-restart success, addr: %s, elapsed: %+v", address, time.Since(start)) return nil } tiup-1.16.3/pkg/cluster/spec/tiproxy.go000066400000000000000000000236001505422223000200200ustar00rootroot00000000000000// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "context" "crypto/tls" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "github.com/prometheus/common/expfmt" ) func proxyUptimeByHost(host string, port int, timeout time.Duration, tlsCfg *tls.Config) time.Duration { if timeout < time.Second { timeout = statusQueryTimeout } scheme := "http" if tlsCfg != nil { scheme = "https" } url := fmt.Sprintf("%s://%s/api/metrics", scheme, utils.JoinHostPort(host, port)) client := utils.NewHTTPClient(timeout, tlsCfg) body, err := client.Get(context.TODO(), url) if err != nil || body == nil { return 0 } var parser expfmt.TextParser reader := bytes.NewReader(body) mf, err := parser.TextToMetricFamilies(reader) if err != nil { return 0 } now := time.Now() for k, v := range mf { if k == promMetricStartTimeSeconds { ms := v.GetMetric() if len(ms) >= 1 { startTime := ms[0].Gauge.GetValue() return now.Sub(time.Unix(int64(startTime), 0)) } return 0 } } return 0 } // TiProxySpec represents the TiProxy topology specification in topology.yaml type TiProxySpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Port int `yaml:"port" default:"6000"` StatusPort int `yaml:"status_port" default:"3080"` DeployDir string `yaml:"deploy_dir,omitempty"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Role returns the component role of the instance func (s *TiProxySpec) Role() string { return ComponentTiProxy } // SSH returns the host and SSH port of the instance func (s *TiProxySpec) SSH() (string, int) { return s.Host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TiProxySpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *TiProxySpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *TiProxySpec) IsImported() bool { return false } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TiProxySpec) IgnoreMonitorAgent() bool { return false } // TiProxyComponent represents TiProxy component. type TiProxyComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiProxyComponent) Name() string { return ComponentTiProxy } // Role implements Component interface. func (c *TiProxyComponent) Role() string { return ComponentTiProxy } // Source implements Component interface. func (c *TiProxyComponent) Source() string { return ComponentTiProxy } // CalculateVersion implements the Component interface func (c *TiProxyComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.TiProxy if version == "" { // always not follow global version // because tiproxy version is different from clusterVersion // but "nightly" is effective if clusterVersion == "nightly" { version = clusterVersion } } return version } // SetVersion implements Component interface. func (c *TiProxyComponent) SetVersion(version string) { c.Topology.ComponentVersions.TiProxy = version } // Instances implements Component interface. func (c *TiProxyComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TiProxyServers)) for _, s := range c.Topology.TiProxyServers { instance := &TiProxyInstance{BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: c.Topology.BaseTopo().GlobalOptions.ListenHost, Port: s.Port, SSHP: s.SSHPort, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, s.StatusPort, }, Dirs: []string{ s.DeployDir, }, StatusFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { return statusByHost(s.Host, s.StatusPort, "/api/debug/health", timeout, tlsCfg) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return proxyUptimeByHost(s.Host, s.StatusPort, timeout, tlsCfg) }, Component: c, }, c.Topology} ins = append(ins, instance) } return ins } // TiProxyInstance represent the TiProxy instance. type TiProxyInstance struct { BaseInstance topo Topology } // ScaleConfig deploy temporary config on scaling func (i *TiProxyInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, user string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, user, paths) } func (i *TiProxyInstance) checkConfig( cfg map[string]any, paths meta.DirPaths, ) map[string]any { topo := i.topo.(*Specification) spec := i.InstanceSpec.(*TiProxySpec) if cfg == nil { cfg = make(map[string]any) } pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, utils.JoinHostPort(pdspec.Host, pdspec.ClientPort)) } cfg["proxy.pd-addrs"] = strings.Join(pds, ",") cfg["proxy.addr"] = utils.JoinHostPort(i.GetListenHost(), i.GetPort()) cfg["proxy.advertise-addr"] = spec.Host cfg["api.addr"] = utils.JoinHostPort(i.GetListenHost(), spec.StatusPort) cfg["log.log-file.filename"] = filepath.Join(paths.Log, "tiproxy.log") return cfg } // InitConfig implements Instance interface. func (i *TiProxyInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } spec := i.InstanceSpec.(*TiProxySpec) globalConfig := topo.ServerConfigs.TiProxy instanceConfig := i.checkConfig(spec.Config, paths) cfg := &scripts.TiProxyScript{ DeployDir: paths.Deploy, NumaNode: spec.NumaNode, } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tiproxy_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_tiproxy.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := e.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } var err error instanceConfig, err = i.setTLSConfig(ctx, topo.GlobalOptions.TLSEnabled, instanceConfig, paths) if err != nil { return err } return i.MergeServerConfig(ctx, e, globalConfig, instanceConfig, paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *TiProxyInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { if configs == nil { configs = make(map[string]any) } if enableTLS { configs["security.cluster-tls.ca"] = fmt.Sprintf("%s/tls/%s", paths.Deploy, TLSCACert) configs["security.cluster-tls.cert"] = fmt.Sprintf("%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.cluster-tls.key"] = fmt.Sprintf("%s/tls/%s.pem", paths.Deploy, i.Role()) configs["security.server-http-tls.ca"] = fmt.Sprintf("%s/tls/%s", paths.Deploy, TLSCACert) configs["security.server-http-tls.cert"] = fmt.Sprintf("%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.server-http-tls.key"] = fmt.Sprintf("%s/tls/%s.pem", paths.Deploy, i.Role()) configs["security.server-http-tls.skip-ca"] = true configs["security.sql-tls.ca"] = fmt.Sprintf("%s/tls/%s", paths.Deploy, TLSCACert) configs["security.sql-tls.cert"] = fmt.Sprintf("%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.sql-tls.key"] = fmt.Sprintf("%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.cluster-tls.ca", "security.cluster-tls.cert", "security.cluster-tls.key", "security.server-tls.ca", "security.server-tls.cert", "security.server-tls.key", "security.server-tls.skip-ca", "security.server-http-tls.ca", "security.server-http-tls.cert", "security.server-http-tls.key", "security.server-http-tls.skip-ca", "security.sql-tls.ca", "security.sql-tls.cert", "security.sql-tls.key", } // delete TLS configs for _, config := range tlsConfigs { delete(configs, config) } } return configs, nil } // GetAddr return the address of this TiProxy instance func (i *TiProxyInstance) GetAddr() string { return utils.JoinHostPort(i.GetHost(), i.GetPort()) } var _ RollingUpdateInstance = &TiProxyInstance{} // PreRestart implements RollingUpdateInstance interface. func (i *TiProxyInstance) PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, updcfg *UpdateConfig) error { return nil } // PostRestart implements RollingUpdateInstance interface. func (i *TiProxyInstance) PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, updcfg *UpdateConfig) error { return nil } tiup-1.16.3/pkg/cluster/spec/tispark.go000066400000000000000000000424251505422223000177650ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "reflect" "strings" "time" "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/config" "github.com/pingcap/tiup/pkg/cluster/template/scripts" system "github.com/pingcap/tiup/pkg/cluster/template/systemd" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) // TiSparkMasterSpec is the topology specification for TiSpark master node type TiSparkMasterSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` ListenHost string `yaml:"listen_host,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"7077"` WebPort int `yaml:"web_port" default:"8080"` DeployDir string `yaml:"deploy_dir,omitempty"` JavaHome string `yaml:"java_home,omitempty" validate:"java_home:editable"` SparkConfigs map[string]any `yaml:"spark_config,omitempty" validate:"spark_config:ignore"` SparkEnvs map[string]string `yaml:"spark_env,omitempty" validate:"spark_env:ignore"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Role returns the component role of the instance func (s *TiSparkMasterSpec) Role() string { return RoleTiSparkMaster } // SSH returns the host and SSH port of the instance func (s *TiSparkMasterSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TiSparkMasterSpec) GetMainPort() int { return s.Port } // IsImported returns if the node is imported from TiDB-Ansible func (s *TiSparkMasterSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TiSparkMasterSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // TiSparkWorkerSpec is the topology specification for TiSpark slave nodes type TiSparkWorkerSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty"` ListenHost string `yaml:"listen_host,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` Imported bool `yaml:"imported,omitempty"` Patched bool `yaml:"patched,omitempty"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` Port int `yaml:"port" default:"7078"` WebPort int `yaml:"web_port" default:"8081"` DeployDir string `yaml:"deploy_dir,omitempty"` JavaHome string `yaml:"java_home,omitempty" validate:"java_home:editable"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Role returns the component role of the instance func (s *TiSparkWorkerSpec) Role() string { return RoleTiSparkWorker } // SSH returns the host and SSH port of the instance func (s *TiSparkWorkerSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TiSparkWorkerSpec) GetMainPort() int { return s.Port } // IsImported returns if the node is imported from TiDB-Ansible func (s *TiSparkWorkerSpec) IsImported() bool { return s.Imported } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TiSparkWorkerSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // TiSparkMasterComponent represents TiSpark master component. type TiSparkMasterComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiSparkMasterComponent) Name() string { return ComponentTiSpark } // Role implements Component interface. func (c *TiSparkMasterComponent) Role() string { return RoleTiSparkMaster } // Source implements Component interface. func (c *TiSparkMasterComponent) Source() string { return ComponentTiSpark } // CalculateVersion implements the Component interface func (c *TiSparkMasterComponent) CalculateVersion(clusterVersion string) string { return "" } // SetVersion implements Component interface. func (c *TiSparkMasterComponent) SetVersion(version string) { // should never be calls } // Instances implements Component interface. func (c *TiSparkMasterComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TiSparkMasters)) for _, s := range c.Topology.TiSparkMasters { ins = append(ins, &TiSparkMasterInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, Name: c.Name(), ManageHost: s.ManageHost, Host: s.Host, Port: s.Port, SSHP: s.SSHPort, NumaNode: "", NumaCores: "", Ports: []int{ s.Port, s.WebPort, }, Dirs: []string{ s.DeployDir, }, StatusFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { return statusByHost(s.Host, s.WebPort, "", timeout, tlsCfg) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return 0 }, Component: c, }, topo: c.Topology, }) } return ins } // TiSparkMasterInstance represent the TiSpark master instance type TiSparkMasterInstance struct { BaseInstance topo Topology } // GetCustomFields get custom spark configs of the instance func (i *TiSparkMasterInstance) GetCustomFields() map[string]any { v := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("SparkConfigs") if !v.IsValid() { return nil } return v.Interface().(map[string]any) } // GetCustomEnvs get custom spark environment variables of the instance func (i *TiSparkMasterInstance) GetCustomEnvs() map[string]string { v := reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("SparkEnvs") if !v.IsValid() { return nil } return v.Interface().(map[string]string) } // GetJavaHome returns the java_home value in spec func (i *TiSparkMasterInstance) GetJavaHome() string { return reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("JavaHome").String() } // InitConfig implement Instance interface func (i *TiSparkMasterInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) (err error) { // generate systemd service to invoke spark's start/stop scripts comp := i.Role() host := i.GetHost() port := i.GetPort() topo := i.topo.(*Specification) sysCfg := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, host, port)) // insert checkpoint point := checkpoint.Acquire(ctx, CopyConfigFile, map[string]any{"config-file": sysCfg}) defer func() { point.Release(err, zap.String("config-file", sysCfg)) }() if point.Hit() != nil { return nil } systemCfg := system.NewTiSparkConfig(comp, deployUser, paths.Deploy, i.GetJavaHome()) if err := systemCfg.ConfigToFile(sysCfg); err != nil { return errors.Trace(err) } tgt := filepath.Join("/tmp", comp+"_"+uuid.New().String()+".service") if err := e.Transfer(ctx, sysCfg, tgt, false, 0, false); err != nil { return errors.Annotatef(err, "transfer from %s to %s failed", sysCfg, tgt) } systemdDir := "/etc/systemd/system/" sudo := true if i.topo.BaseTopo().GlobalOptions.SystemdMode == UserMode { systemdDir = "~/.config/systemd/user/" sudo = false } cmd := fmt.Sprintf("mv %s %s%s-%d.service", tgt, systemdDir, comp, port) if _, _, err := e.Execute(ctx, cmd, sudo); err != nil { return errors.Annotatef(err, "execute: %s", cmd) } // restorecon restores SELinux Contexts // Check with: ls -lZ /path/to/file // If the context is wrong systemctl will complain about a missing unit file // Note that we won't check for errors here because: // - We don't support SELinux in Enforcing mode // - restorecon might not be available (Ubuntu doesn't install SELinux tools by default) cmd = fmt.Sprintf("restorecon %s%s-%d.service", systemdDir, comp, port) e.Execute(ctx, cmd, sudo) //nolint // transfer default config pdList := topo.GetPDList() masterList := make([]string, 0) for _, master := range topo.TiSparkMasters { masterList = append(masterList, utils.JoinHostPort(master.Host, master.Port)) } cfg := config.NewTiSparkConfig(pdList).WithMasters(strings.Join(masterList, ",")). WithCustomFields(i.GetCustomFields()) // transfer spark-defaults.conf fp := filepath.Join(paths.Cache, fmt.Sprintf("spark-defaults-%s-%d.conf", host, port)) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "conf", "spark-defaults.conf") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } env := scripts.NewTiSparkEnv(host). WithLocalIP(i.GetListenHost()). WithMaster(host). WithMasterPorts(i.Ports[0], i.Ports[1]). WithCustomEnv(i.GetCustomEnvs()) // transfer spark-env.sh file fp = filepath.Join(paths.Cache, fmt.Sprintf("spark-env-%s-%d.sh", host, port)) if err := env.ScriptToFile(fp); err != nil { return err } // tispark files are all in a "spark" sub-directory of deploy dir dst = filepath.Join(paths.Deploy, "conf", "spark-env.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } // transfer log4j config (it's not a template but a static file) fp = filepath.Join(paths.Cache, fmt.Sprintf("spark-log4j-%s-%d.properties", host, port)) log4jFile, err := config.GetConfig("spark-log4j.properties.tpl") if err != nil { return err } if err := utils.WriteFile(fp, log4jFile, 0644); err != nil { return err } dst = filepath.Join(paths.Deploy, "conf", "log4j.properties") return e.Transfer(ctx, fp, dst, false, 0, false) } // ScaleConfig deploy temporary config on scaling func (i *TiSparkMasterInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() cluster := mustBeClusterTopo(topo) i.topo = cluster.Merge(i.topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } // TiSparkWorkerComponent represents TiSpark slave component. type TiSparkWorkerComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TiSparkWorkerComponent) Name() string { return ComponentTiSpark } // Role implements Component interface. func (c *TiSparkWorkerComponent) Role() string { return RoleTiSparkWorker } // Source implements Component interface. func (c *TiSparkWorkerComponent) Source() string { return ComponentTiSpark } // CalculateVersion implements the Component interface func (c *TiSparkWorkerComponent) CalculateVersion(clusterVersion string) string { return "" } // SetVersion implements Component interface. func (c *TiSparkWorkerComponent) SetVersion(version string) { // should never be called } // Instances implements Component interface. func (c *TiSparkWorkerComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TiSparkWorkers)) for _, s := range c.Topology.TiSparkWorkers { ins = append(ins, &TiSparkWorkerInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, Name: c.Name(), ManageHost: s.ManageHost, Host: s.Host, Port: s.Port, SSHP: s.SSHPort, NumaNode: "", NumaCores: "", Ports: []int{ s.Port, s.WebPort, }, Dirs: []string{ s.DeployDir, }, StatusFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config, _ ...string) string { return statusByHost(s.Host, s.WebPort, "", timeout, tlsCfg) }, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return 0 }, Component: c, }, topo: c.Topology, }) } return ins } // TiSparkWorkerInstance represent the TiSpark slave instance type TiSparkWorkerInstance struct { BaseInstance topo Topology } // GetJavaHome returns the java_home value in spec func (i *TiSparkWorkerInstance) GetJavaHome() string { return reflect.Indirect(reflect.ValueOf(i.InstanceSpec)).FieldByName("JavaHome").String() } // InitConfig implement Instance interface func (i *TiSparkWorkerInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) (err error) { // generate systemd service to invoke spark's start/stop scripts comp := i.Role() host := i.GetHost() port := i.GetPort() topo := i.topo.(*Specification) sysCfg := filepath.Join(paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, host, port)) // insert checkpoint point := checkpoint.Acquire(ctx, CopyConfigFile, map[string]any{"config-file": sysCfg}) defer func() { point.Release(err, zap.String("config-file", sysCfg)) }() if point.Hit() != nil { return nil } systemCfg := system.NewTiSparkConfig(comp, deployUser, paths.Deploy, i.GetJavaHome()) if err := systemCfg.ConfigToFile(sysCfg); err != nil { return errors.Trace(err) } tgt := filepath.Join("/tmp", comp+"_"+uuid.New().String()+".service") if err := e.Transfer(ctx, sysCfg, tgt, false, 0, false); err != nil { return errors.Annotatef(err, "transfer from %s to %s failed", sysCfg, tgt) } systemdDir := "/etc/systemd/system/" sudo := true if i.topo.BaseTopo().GlobalOptions.SystemdMode == UserMode { systemdDir = "~/.config/systemd/user/" sudo = false } cmd := fmt.Sprintf("mv %s %s%s-%d.service", tgt, systemdDir, comp, port) if _, _, err := e.Execute(ctx, cmd, sudo); err != nil { return errors.Annotatef(err, "execute: %s", cmd) } // transfer default config pdList := topo.GetPDList() masterList := make([]string, 0) for _, master := range topo.TiSparkMasters { masterList = append(masterList, utils.JoinHostPort(master.Host, master.Port)) } cfg := config.NewTiSparkConfig(pdList).WithMasters(strings.Join(masterList, ",")). WithCustomFields(topo.TiSparkMasters[0].SparkConfigs) // doesn't work if _, err := i.setTLSConfig(ctx, false, nil, paths); err != nil { return err } // transfer spark-defaults.conf fp := filepath.Join(paths.Cache, fmt.Sprintf("spark-defaults-%s-%d.conf", host, port)) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "conf", "spark-defaults.conf") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } env := scripts.NewTiSparkEnv(host). WithLocalIP(i.GetListenHost()). WithMaster(topo.TiSparkMasters[0].Host). WithMasterPorts(topo.TiSparkMasters[0].Port, topo.TiSparkMasters[0].WebPort). WithWorkerPorts(i.Ports[0], i.Ports[1]). WithCustomEnv(topo.TiSparkMasters[0].SparkEnvs) // transfer spark-env.sh file fp = filepath.Join(paths.Cache, fmt.Sprintf("spark-env-%s-%d.sh", host, port)) if err := env.ScriptToFile(fp); err != nil { return err } // tispark files are all in a "spark" sub-directory of deploy dir dst = filepath.Join(paths.Deploy, "conf", "spark-env.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } // transfer start-slave.sh fp = filepath.Join(paths.Cache, fmt.Sprintf("start-tispark-slave-%s-%d.sh", host, port)) slaveSh, err := env.SlaveScriptWithTemplate() if err != nil { return err } if err := utils.WriteFile(fp, slaveSh, 0755); err != nil { return err } dst = filepath.Join(paths.Deploy, "sbin", "start-slave.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } // transfer log4j config (it's not a template but a static file) fp = filepath.Join(paths.Cache, fmt.Sprintf("spark-log4j-%s-%d.properties", host, port)) log4jFile, err := config.GetConfig("spark-log4j.properties.tpl") if err != nil { return err } if err := utils.WriteFile(fp, log4jFile, 0644); err != nil { return err } dst = filepath.Join(paths.Deploy, "conf", "log4j.properties") return e.Transfer(ctx, fp, dst, false, 0, false) } // setTLSConfig set TLS Config to support enable/disable TLS // TiSparkWorkerInstance no need to configure TLS func (i *TiSparkWorkerInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { return nil, nil } // ScaleConfig deploy temporary config on scaling func (i *TiSparkWorkerInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = topo.Merge(i.topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } tiup-1.16.3/pkg/cluster/spec/tso.go000066400000000000000000000240541505422223000171130ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "context" "crypto/tls" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/template/scripts" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) var tsoService = "tso" // TSOSpec represents the TSO topology specification in topology.yaml type TSOSpec struct { Host string `yaml:"host"` ManageHost string `yaml:"manage_host,omitempty" validate:"manage_host:editable"` ListenHost string `yaml:"listen_host,omitempty"` AdvertiseListenAddr string `yaml:"advertise_listen_addr,omitempty"` SSHPort int `yaml:"ssh_port,omitempty" validate:"ssh_port:editable"` IgnoreExporter bool `yaml:"ignore_exporter,omitempty"` // Use Name to get the name with a default value if it's empty. Name string `yaml:"name,omitempty"` Port int `yaml:"port" default:"3379"` DeployDir string `yaml:"deploy_dir,omitempty"` DataDir string `yaml:"data_dir,omitempty"` LogDir string `yaml:"log_dir,omitempty"` Source string `yaml:"source,omitempty" validate:"source:editable"` NumaNode string `yaml:"numa_node,omitempty" validate:"numa_node:editable"` Config map[string]any `yaml:"config,omitempty" validate:"config:ignore"` Arch string `yaml:"arch,omitempty"` OS string `yaml:"os,omitempty"` } // Status queries current status of the instance func (s *TSOSpec) Status(ctx context.Context, timeout time.Duration, tlsCfg *tls.Config, pdList ...string) string { if timeout < time.Second { timeout = statusQueryTimeout } addr := utils.JoinHostPort(s.GetManageHost(), s.Port) tc := api.NewTSOClient(ctx, []string{addr}, timeout, tlsCfg) pc := api.NewPDClient(ctx, pdList, timeout, tlsCfg) // check health err := tc.CheckHealth() if err != nil { return "Down" } primary, err := pc.GetServicePrimary(tsoService) if err != nil { return "ERR" } res := "Up" enableTLS := false if tlsCfg != nil { enableTLS = true } if s.GetAdvertiseListenURL(enableTLS) == primary { res += "|P" } return res } // Role returns the component role of the instance func (s *TSOSpec) Role() string { return ComponentTSO } // SSH returns the host and SSH port of the instance func (s *TSOSpec) SSH() (string, int) { host := s.Host if s.ManageHost != "" { host = s.ManageHost } return host, s.SSHPort } // GetMainPort returns the main port of the instance func (s *TSOSpec) GetMainPort() int { return s.Port } // GetManageHost returns the manage host of the instance func (s *TSOSpec) GetManageHost() string { if s.ManageHost != "" { return s.ManageHost } return s.Host } // IsImported returns if the node is imported from TiDB-Ansible func (s *TSOSpec) IsImported() bool { return false } // IgnoreMonitorAgent returns if the node does not have monitor agents available func (s *TSOSpec) IgnoreMonitorAgent() bool { return s.IgnoreExporter } // GetAdvertiseListenURL returns AdvertiseListenURL func (s *TSOSpec) GetAdvertiseListenURL(enableTLS bool) string { if s.AdvertiseListenAddr != "" { return s.AdvertiseListenAddr } scheme := utils.Ternary(enableTLS, "https", "http").(string) return fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(s.Host, s.Port)) } // TSOComponent represents TSO component. type TSOComponent struct{ Topology *Specification } // Name implements Component interface. func (c *TSOComponent) Name() string { return ComponentTSO } // Role implements Component interface. func (c *TSOComponent) Role() string { return ComponentTSO } // Source implements Component interface. func (c *TSOComponent) Source() string { source := c.Topology.ComponentSources.PD if source != "" { return source } return ComponentPD } // CalculateVersion implements the Component interface func (c *TSOComponent) CalculateVersion(clusterVersion string) string { version := c.Topology.ComponentVersions.TSO if version == "" { version = clusterVersion } return version } // SetVersion implements Component interface. func (c *TSOComponent) SetVersion(version string) { c.Topology.ComponentVersions.TSO = version } // Instances implements Component interface. func (c *TSOComponent) Instances() []Instance { ins := make([]Instance, 0, len(c.Topology.TSOServers)) for _, s := range c.Topology.TSOServers { ins = append(ins, &TSOInstance{ BaseInstance: BaseInstance{ InstanceSpec: s, Name: c.Name(), Host: s.Host, ManageHost: s.ManageHost, ListenHost: utils.Ternary(s.ListenHost != "", s.ListenHost, c.Topology.BaseTopo().GlobalOptions.ListenHost).(string), Port: s.Port, SSHP: s.SSHPort, Source: s.Source, NumaNode: s.NumaNode, NumaCores: "", Ports: []int{ s.Port, }, Dirs: []string{ s.DeployDir, s.DataDir, }, StatusFn: s.Status, UptimeFn: func(_ context.Context, timeout time.Duration, tlsCfg *tls.Config) time.Duration { return UptimeByHost(s.GetManageHost(), s.Port, timeout, tlsCfg) }, Component: c, }, topo: c.Topology, }) } return ins } // TSOInstance represent the TSO instance type TSOInstance struct { BaseInstance topo Topology } // InitConfig implement Instance interface func (i *TSOInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { topo := i.topo.(*Specification) if err := i.BaseInstance.InitConfig(ctx, e, topo.GlobalOptions, deployUser, paths); err != nil { return err } enableTLS := topo.GlobalOptions.TLSEnabled spec := i.InstanceSpec.(*TSOSpec) scheme := utils.Ternary(enableTLS, "https", "http").(string) version := i.CalculateVersion(clusterVersion) pds := []string{} for _, pdspec := range topo.PDServers { pds = append(pds, pdspec.GetAdvertiseClientURL(enableTLS)) } cfg := &scripts.TSOScript{ Name: spec.Name, ListenURL: fmt.Sprintf("%s://%s", scheme, utils.JoinHostPort(i.GetListenHost(), spec.Port)), AdvertiseListenURL: spec.GetAdvertiseListenURL(enableTLS), BackendEndpoints: strings.Join(pds, ","), DeployDir: paths.Deploy, DataDir: paths.Data[0], LogDir: paths.Log, NumaNode: spec.NumaNode, } if !tidbver.PDSupportMicroservicesWithName(version) { cfg.Name = "" } fp := filepath.Join(paths.Cache, fmt.Sprintf("run_tso_%s_%d.sh", i.GetHost(), i.GetPort())) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(paths.Deploy, "scripts", "run_tso.sh") if err := e.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } _, _, err := e.Execute(ctx, "chmod +x "+dst, false) if err != nil { return err } globalConfig := topo.ServerConfigs.TSO // set TLS configs spec.Config, err = i.setTLSConfig(ctx, enableTLS, spec.Config, paths) if err != nil { return err } if err := i.MergeServerConfig(ctx, e, globalConfig, spec.Config, paths); err != nil { return err } return checkConfig(ctx, e, i.ComponentName(), i.ComponentSource(), version, i.OS(), i.Arch(), i.ComponentName()+".toml", paths) } // setTLSConfig set TLS Config to support enable/disable TLS func (i *TSOInstance) setTLSConfig(ctx context.Context, enableTLS bool, configs map[string]any, paths meta.DirPaths) (map[string]any, error) { // set TLS configs if enableTLS { if configs == nil { configs = make(map[string]any) } configs["security.cacert-path"] = fmt.Sprintf( "%s/tls/%s", paths.Deploy, TLSCACert, ) configs["security.cert-path"] = fmt.Sprintf( "%s/tls/%s.crt", paths.Deploy, i.Role()) configs["security.key-path"] = fmt.Sprintf( "%s/tls/%s.pem", paths.Deploy, i.Role()) } else { // drainer tls config list tlsConfigs := []string{ "security.cacert-path", "security.cert-path", "security.key-path", } // delete TLS configs if configs != nil { for _, config := range tlsConfigs { delete(configs, config) } } } return configs, nil } // IsPrimary checks if the instance is primary func (i *TSOInstance) IsPrimary(ctx context.Context, topo Topology, tlsCfg *tls.Config) (bool, error) { tidbTopo, ok := topo.(*Specification) if !ok { panic("topo should be type of tidb topology") } pdClient := api.NewPDClient(ctx, tidbTopo.GetPDListWithManageHost(), time.Second*5, tlsCfg) primary, err := pdClient.GetServicePrimary(tsoService) if err != nil { return false, errors.Annotatef(err, "failed to get TSO primary %s", i.GetHost()) } spec := i.InstanceSpec.(*TSOSpec) enableTLS := false if tlsCfg != nil { enableTLS = true } return primary == spec.GetAdvertiseListenURL(enableTLS), nil } // ScaleConfig deploy temporary config on scaling func (i *TSOInstance) ScaleConfig( ctx context.Context, e ctxt.Executor, topo Topology, clusterName, clusterVersion, deployUser string, paths meta.DirPaths, ) error { s := i.topo defer func() { i.topo = s }() i.topo = mustBeClusterTopo(topo) return i.InitConfig(ctx, e, clusterName, clusterVersion, deployUser, paths) } var _ RollingUpdateInstance = &TSOInstance{} // PreRestart implements RollingUpdateInstance interface. func (i *TSOInstance) PreRestart(ctx context.Context, topo Topology, apiTimeoutSeconds int, tlsCfg *tls.Config, updcfg *UpdateConfig) error { return nil } // PostRestart implements RollingUpdateInstance interface. func (i *TSOInstance) PostRestart(ctx context.Context, topo Topology, tlsCfg *tls.Config, updcfg *UpdateConfig) error { return nil } tiup-1.16.3/pkg/cluster/spec/util.go000066400000000000000000000144451505422223000172660ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "context" "crypto/tls" "fmt" "path/filepath" "reflect" "strings" "time" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "github.com/prometheus/common/expfmt" "go.etcd.io/etcd/client/pkg/v3/transport" ) var tidbSpec *SpecManager // GetSpecManager return the spec manager of tidb cluster. func GetSpecManager() *SpecManager { if !initialized { panic("must Initialize profile first") } return tidbSpec } // ClusterMeta is the specification of generic cluster metadata type ClusterMeta struct { User string `yaml:"user"` // the user to run and manage cluster on remote Version string `yaml:"tidb_version"` // the version of TiDB cluster // EnableFirewall bool `yaml:"firewall"` OpsVer string `yaml:"last_ops_ver,omitempty"` // the version of ourself that updated the meta last time Topology *Specification `yaml:"topology"` } var _ UpgradableMetadata = &ClusterMeta{} // SetVersion implement UpgradableMetadata interface. func (m *ClusterMeta) SetVersion(s string) { m.Version = s } // SetUser implement UpgradableMetadata interface. func (m *ClusterMeta) SetUser(s string) { m.User = s } // GetTopology implement Metadata interface. func (m *ClusterMeta) GetTopology() Topology { return m.Topology } // SetTopology implement Metadata interface. func (m *ClusterMeta) SetTopology(topo Topology) { tidbTopo, ok := topo.(*Specification) if !ok { panic(fmt.Sprintln("wrong type: ", reflect.TypeOf(topo))) } m.Topology = tidbTopo } // GetBaseMeta implements Metadata interface. func (m *ClusterMeta) GetBaseMeta() *BaseMeta { return &BaseMeta{ Version: m.Version, User: m.User, OpsVer: &m.OpsVer, } } // AuditDir return the directory for saving audit log. func AuditDir() string { return filepath.Join(profileDir, TiUPAuditDir) } // SaveClusterMeta saves the cluster meta information to profile directory func SaveClusterMeta(clusterName string, cmeta *ClusterMeta) error { // set the cmd version cmeta.OpsVer = version.NewTiUPVersion().String() return GetSpecManager().SaveMeta(clusterName, cmeta) } // ClusterMetadata tries to read the metadata of a cluster from file func ClusterMetadata(clusterName string) (*ClusterMeta, error) { var cm ClusterMeta err := GetSpecManager().Metadata(clusterName, &cm) if err != nil { // Return the value of cm even on error, to make sure the caller can get the data // we read, if there's any. // This is necessary when, either by manual editing of meta.yaml file, by not fully // validated `edit-config`, or by some unexpected operations from a broken legacy // release, we could provide max possibility that operations like `display`, `scale` // and `destroy` are still (more or less) working, by ignoring certain errors. return &cm, err } return &cm, nil } // LoadClientCert read and load the client cert key pair and CA cert func LoadClientCert(dir string) (*tls.Config, error) { return transport.TLSInfo{ TrustedCAFile: filepath.Join(dir, TLSCACert), CertFile: filepath.Join(dir, TLSClientCert), KeyFile: filepath.Join(dir, TLSClientKey), }.ClientConfig() } // statusByHost queries current status of the instance by http status api. func statusByHost(host string, port int, path string, timeout time.Duration, tlsCfg *tls.Config) string { if timeout < time.Second { timeout = statusQueryTimeout } client := utils.NewHTTPClient(timeout, tlsCfg) scheme := "http" if tlsCfg != nil { scheme = "https" } if path == "" { path = "/" } url := fmt.Sprintf("%s://%s%s", scheme, utils.JoinHostPort(host, port), path) // body doesn't have any status section needed body, err := client.Get(context.TODO(), url) if err != nil || body == nil { return "Down" } return "Up" } // UptimeByHost queries current uptime of the instance by http Prometheus metric api. func UptimeByHost(host string, port int, timeout time.Duration, tlsCfg *tls.Config) time.Duration { if timeout < time.Second { timeout = statusQueryTimeout } scheme := "http" if tlsCfg != nil { scheme = "https" } url := fmt.Sprintf("%s://%s/metrics", scheme, utils.JoinHostPort(host, port)) client := utils.NewHTTPClient(timeout, tlsCfg) body, err := client.Get(context.TODO(), url) if err != nil || body == nil { return 0 } var parser expfmt.TextParser reader := bytes.NewReader(body) mf, err := parser.TextToMetricFamilies(reader) if err != nil { return 0 } now := time.Now() for k, v := range mf { if k == promMetricStartTimeSeconds { ms := v.GetMetric() if len(ms) >= 1 { startTime := ms[0].Gauge.GetValue() return now.Sub(time.Unix(int64(startTime), 0)) } return 0 } } return 0 } // Abs returns the absolute path func Abs(user, path string) string { // trim whitespaces before joining user = strings.TrimSpace(user) path = strings.TrimSpace(path) if !strings.HasPrefix(path, "/") { path = filepath.Join("/home", user, path) } return filepath.Clean(path) } // MultiDirAbs returns the absolute path for multi-dir separated by comma func MultiDirAbs(user, paths string) []string { var dirs []string for path := range strings.SplitSeq(paths, ",") { path = strings.TrimSpace(path) if path == "" { continue } dirs = append(dirs, Abs(user, path)) } return dirs } // PackagePath return the tar bar path func PackagePath(comp string, version string, os string, arch string) string { fileName := fmt.Sprintf("%s-%s-%s-%s.tar.gz", comp, version, os, arch) return ProfilePath(TiUPPackageCacheDir, fileName) } // GetDMMasterPackageName return package name of the first DMMaster instance func GetDMMasterPackageName(topo Topology) string { for _, c := range topo.ComponentsByStartOrder() { if c.Name() == ComponentDMMaster { instances := c.Instances() if len(instances) > 0 { return instances[0].ComponentSource() } } } return ComponentDMMaster } tiup-1.16.3/pkg/cluster/spec/util_test.go000066400000000000000000000032721505422223000203210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "testing" "github.com/stretchr/testify/require" ) func TestAbs(t *testing.T) { var path string path = Abs(" foo", "") require.Equal(t, "/home/foo", path) path = Abs("foo ", " ") require.Equal(t, "/home/foo", path) path = Abs("foo", "bar") require.Equal(t, "/home/foo/bar", path) path = Abs("foo", " bar") require.Equal(t, "/home/foo/bar", path) path = Abs("foo", "bar ") require.Equal(t, "/home/foo/bar", path) path = Abs("foo", " bar ") require.Equal(t, "/home/foo/bar", path) path = Abs("foo", "/bar") require.Equal(t, "/bar", path) path = Abs("foo", " /bar") require.Equal(t, "/bar", path) path = Abs("foo", "/bar ") require.Equal(t, "/bar", path) path = Abs("foo", " /bar ") require.Equal(t, "/bar", path) } func TestMultiDirAbs(t *testing.T) { paths := MultiDirAbs("tidb", "") require.Equal(t, 0, len(paths)) paths = MultiDirAbs("tidb", " ") require.Equal(t, 0, len(paths)) paths = MultiDirAbs("tidb", "a ") require.Equal(t, 1, len(paths)) require.Equal(t, "/home/tidb/a", paths[0]) paths = MultiDirAbs("tidb", "a , /tmp/b") require.Equal(t, 2, len(paths)) require.Equal(t, "/home/tidb/a", paths[0]) require.Equal(t, "/tmp/b", paths[1]) } tiup-1.16.3/pkg/cluster/spec/validate.go000066400000000000000000001002141505422223000200700ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "fmt" "path/filepath" "reflect" "regexp" "sort" "strconv" "strings" "github.com/pingcap/tiup/pkg/cluster/api" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) // pre defined error types var ( errNSDeploy = errNS.NewSubNamespace("deploy") errDeployDirConflict = errNSDeploy.NewType("dir_conflict", utils.ErrTraitPreCheck) errDeployDirOverlap = errNSDeploy.NewType("dir_overlap", utils.ErrTraitPreCheck) errDeployPortConflict = errNSDeploy.NewType("port_conflict", utils.ErrTraitPreCheck) ErrNoTiSparkMaster = errors.New("there must be a Spark master node if you want to use the TiSpark component") ErrMultipleTiSparkMaster = errors.New("a TiSpark enabled cluster with more than 1 Spark master node is not supported") ErrMultipleTisparkWorker = errors.New("multiple TiSpark workers on the same host is not supported by Spark") ErrUserOrGroupInvalid = errors.New(`linux username and groupname must start with a lower case letter or an underscore, ` + `followed by lower case letters, digits, underscores, or dashes. ` + `Usernames may only be up to 32 characters long. ` + `Groupnames may only be up to 16 characters long.`) ) // Linux username and groupname must start with a lower case letter or an underscore, // followed by lower case letters, digits, underscores, or dashes. // ref https://man7.org/linux/man-pages/man8/useradd.8.html // ref https://man7.org/linux/man-pages/man8/groupadd.8.html var ( reUser = regexp.MustCompile(`^[a-z_]([a-z0-9_-]{0,31}|[a-z0-9_-]{0,30}\$)$`) reGroup = regexp.MustCompile(`^[a-z_]([a-z0-9_-]{0,15})$`) ) func fixDir(topo Topology) func(string) string { return func(dir string) string { if dir != "" { return Abs(topo.BaseTopo().GlobalOptions.User, dir) } return dir } } // DirAccessor stands for a directory accessor for an instance type DirAccessor struct { dirKind string accessor func(Instance, Topology) string } func dirAccessors() ([]DirAccessor, []DirAccessor) { instanceDirAccessor := []DirAccessor{ {dirKind: "deploy directory", accessor: func(instance Instance, topo Topology) string { return instance.DeployDir() }}, {dirKind: "data directory", accessor: func(instance Instance, topo Topology) string { return instance.DataDir() }}, {dirKind: "log directory", accessor: func(instance Instance, topo Topology) string { return instance.LogDir() }}, } hostDirAccessor := []DirAccessor{ {dirKind: "monitor deploy directory", accessor: func(instance Instance, topo Topology) string { m := topo.BaseTopo().MonitoredOptions if m == nil { return "" } return m.DeployDir }}, {dirKind: "monitor data directory", accessor: func(instance Instance, topo Topology) string { m := topo.BaseTopo().MonitoredOptions if m == nil { return "" } return m.DataDir }}, {dirKind: "monitor log directory", accessor: func(instance Instance, topo Topology) string { m := topo.BaseTopo().MonitoredOptions if m == nil { return "" } return m.LogDir }}, } return instanceDirAccessor, hostDirAccessor } // DirEntry stands for a directory with attributes and instance type DirEntry struct { clusterName string dirKind string dir string instance Instance } func appendEntries(name string, topo Topology, inst Instance, dirAccessor DirAccessor, targets []DirEntry) []DirEntry { for dir := range strings.SplitSeq(fixDir(topo)(dirAccessor.accessor(inst, topo)), ",") { targets = append(targets, DirEntry{ clusterName: name, dirKind: dirAccessor.dirKind, dir: dir, instance: inst, }) } return targets } // CheckClusterDirConflict checks cluster dir conflict or overlap func CheckClusterDirConflict(clusterList map[string]Metadata, clusterName string, topo Topology) error { instanceDirAccessor, hostDirAccessor := dirAccessors() currentEntries := []DirEntry{} existingEntries := []DirEntry{} // rebuild existing disk status for name, metadata := range clusterList { if name == clusterName { continue } topo := metadata.GetTopology() topo.IterInstance(func(inst Instance) { for _, dirAccessor := range instanceDirAccessor { existingEntries = appendEntries(name, topo, inst, dirAccessor, existingEntries) } }) IterHost(topo, func(inst Instance) { for _, dirAccessor := range hostDirAccessor { existingEntries = appendEntries(name, topo, inst, dirAccessor, existingEntries) } }) } topo.IterInstance(func(inst Instance) { for _, dirAccessor := range instanceDirAccessor { currentEntries = appendEntries(clusterName, topo, inst, dirAccessor, currentEntries) } }) IterHost(topo, func(inst Instance) { for _, dirAccessor := range hostDirAccessor { currentEntries = appendEntries(clusterName, topo, inst, dirAccessor, currentEntries) } }) for _, d1 := range currentEntries { // data_dir is relative to deploy_dir by default, so they can be with // same (sub) paths as long as the deploy_dirs are different if d1.dirKind == "data directory" && !strings.HasPrefix(d1.dir, "/") { continue } for _, d2 := range existingEntries { if d1.instance.GetManageHost() != d2.instance.GetManageHost() { continue } // ignore conflict in the case when both sides are monitor and either one of them // is marked as ignore exporter. if strings.HasPrefix(d1.dirKind, "monitor") && strings.HasPrefix(d2.dirKind, "monitor") && (d1.instance.IgnoreMonitorAgent() || d2.instance.IgnoreMonitorAgent()) { continue } if d1.dir == d2.dir && d1.dir != "" { properties := map[string]string{ "ThisDirKind": d1.dirKind, "ThisDir": d1.dir, "ThisComponent": d1.instance.ComponentName(), "ThisHost": d1.instance.GetManageHost(), "ExistCluster": d2.clusterName, "ExistDirKind": d2.dirKind, "ExistDir": d2.dir, "ExistComponent": d2.instance.ComponentName(), "ExistHost": d2.instance.GetManageHost(), } zap.L().Info("Meet deploy directory conflict", zap.Any("info", properties)) return errDeployDirConflict.New("Deploy directory conflicts to an existing cluster").WithProperty(tui.SuggestionFromTemplate(` The directory you specified in the topology file is: Directory: {{ColorKeyword}}{{.ThisDirKind}} {{.ThisDir}}{{ColorReset}} Component: {{ColorKeyword}}{{.ThisComponent}} {{.ThisHost}}{{ColorReset}} It conflicts to a directory in the existing cluster: Existing Cluster Name: {{ColorKeyword}}{{.ExistCluster}}{{ColorReset}} Existing Directory: {{ColorKeyword}}{{.ExistDirKind}} {{.ExistDir}}{{ColorReset}} Existing Component: {{ColorKeyword}}{{.ExistComponent}} {{.ExistHost}}{{ColorReset}} Please change to use another directory or another host. `, properties)) } } } return CheckClusterDirOverlap(currentEntries) } // CheckClusterDirOverlap checks cluster dir overlaps with data or log. // this should only be used across clusters. // we don't allow to deploy log under data, and vise versa. // ref https://github.com/pingcap/tiup/issues/1047#issuecomment-761711508 func CheckClusterDirOverlap(entries []DirEntry) error { ignore := func(d1, d2 DirEntry) bool { return (d1.instance.GetManageHost() != d2.instance.GetManageHost()) || d1.dir == "" || d2.dir == "" || strings.HasSuffix(d1.dirKind, "deploy directory") || strings.HasSuffix(d2.dirKind, "deploy directory") } for i := 0; i < len(entries)-1; i++ { d1 := entries[i] for j := i + 1; j < len(entries); j++ { d2 := entries[j] if ignore(d1, d2) { continue } if utils.IsSubDir(d1.dir, d2.dir) || utils.IsSubDir(d2.dir, d1.dir) { // overlap is allowed in the case both sides are imported if d1.instance.IsImported() && d2.instance.IsImported() { continue } // overlap is allowed in the case one side is imported and the other is monitor, // we assume that the monitor is deployed with the first instance in that host, // it implies that the monitor is imported too. if (strings.HasPrefix(d1.dirKind, "monitor") && d2.instance.IsImported()) || (d1.instance.IsImported() && strings.HasPrefix(d2.dirKind, "monitor")) { continue } // overlap is allowed in the case one side is data dir of a monitor instance, // as the *_exporter don't need data dir, the field is only kept for compatibility // with legacy tidb-ansible deployments. if (strings.HasPrefix(d1.dirKind, "monitor data directory")) || (strings.HasPrefix(d2.dirKind, "monitor data directory")) { continue } properties := map[string]string{ "ThisDirKind": d1.dirKind, "ThisDir": d1.dir, "ThisComponent": d1.instance.ComponentName(), "ThisHost": d1.instance.GetManageHost(), "ThatDirKind": d2.dirKind, "ThatDir": d2.dir, "ThatComponent": d2.instance.ComponentName(), "ThatHost": d2.instance.GetManageHost(), } zap.L().Info("Meet deploy directory overlap", zap.Any("info", properties)) return errDeployDirOverlap.New("Deploy directory overlaps to another instance").WithProperty(tui.SuggestionFromTemplate(` The directory you specified in the topology file is: Directory: {{ColorKeyword}}{{.ThisDirKind}} {{.ThisDir}}{{ColorReset}} Component: {{ColorKeyword}}{{.ThisComponent}} {{.ThisHost}}{{ColorReset}} It overlaps to another instance: Other Directory: {{ColorKeyword}}{{.ThatDirKind}} {{.ThatDir}}{{ColorReset}} Other Component: {{ColorKeyword}}{{.ThatComponent}} {{.ThatHost}}{{ColorReset}} Please modify the topology file and try again. `, properties)) } } } return nil } // CheckClusterPortConflict checks cluster port conflict func CheckClusterPortConflict(clusterList map[string]Metadata, clusterName string, topo Topology) error { type Entry struct { clusterName string componentName string port int instance Instance } currentEntries := []Entry{} existingEntries := []Entry{} for name, metadata := range clusterList { if name == clusterName { continue } uniqueHosts := set.NewStringSet() metadata.GetTopology().IterInstance(func(inst Instance) { mOpt := metadata.GetTopology().GetMonitoredOptions() if mOpt == nil { return } nodeExporterPort := mOpt.NodeExporterPort blackboxExporterPort := mOpt.BlackboxExporterPort for _, port := range inst.UsedPorts() { existingEntries = append(existingEntries, Entry{ clusterName: name, componentName: inst.ComponentName(), port: port, instance: inst, }) } if !uniqueHosts.Exist(inst.GetManageHost()) { uniqueHosts.Insert(inst.GetManageHost()) existingEntries = append(existingEntries, Entry{ clusterName: name, componentName: RoleMonitor, port: nodeExporterPort, instance: inst, }, Entry{ clusterName: name, componentName: RoleMonitor, port: blackboxExporterPort, instance: inst, }) } }) } uniqueHosts := set.NewStringSet() topo.IterInstance(func(inst Instance) { for _, port := range inst.UsedPorts() { currentEntries = append(currentEntries, Entry{ componentName: inst.ComponentName(), port: port, instance: inst, }) } mOpt := topo.GetMonitoredOptions() if mOpt == nil { return } if !uniqueHosts.Exist(inst.GetManageHost()) { uniqueHosts.Insert(inst.GetManageHost()) currentEntries = append(currentEntries, Entry{ componentName: RoleMonitor, port: mOpt.NodeExporterPort, instance: inst, }, Entry{ componentName: RoleMonitor, port: mOpt.BlackboxExporterPort, instance: inst, }) } }) for _, p1 := range currentEntries { for _, p2 := range existingEntries { if p1.instance.GetManageHost() != p2.instance.GetManageHost() { continue } if p1.port == p2.port { // build the conflict info properties := map[string]string{ "ThisPort": strconv.Itoa(p1.port), "ThisComponent": p1.componentName, "ThisHost": p1.instance.GetManageHost(), "ExistCluster": p2.clusterName, "ExistPort": strconv.Itoa(p2.port), "ExistComponent": p2.componentName, "ExistHost": p2.instance.GetManageHost(), } // if one of the instances marks itself as ignore_exporter, do not report // the monitoring agent ports conflict and just skip if (p1.componentName == RoleMonitor || p2.componentName == RoleMonitor) && (p1.instance.IgnoreMonitorAgent() || p2.instance.IgnoreMonitorAgent()) { zap.L().Debug("Ignored deploy port conflict", zap.Any("info", properties)) continue } // build error message zap.L().Info("Meet deploy port conflict", zap.Any("info", properties)) return errDeployPortConflict.New("Deploy port conflicts to an existing cluster").WithProperty(tui.SuggestionFromTemplate(` The port you specified in the topology file is: Port: {{ColorKeyword}}{{.ThisPort}}{{ColorReset}} Component: {{ColorKeyword}}{{.ThisComponent}} {{.ThisHost}}{{ColorReset}} It conflicts to a port in the existing cluster: Existing Cluster Name: {{ColorKeyword}}{{.ExistCluster}}{{ColorReset}} Existing Port: {{ColorKeyword}}{{.ExistPort}}{{ColorReset}} Existing Component: {{ColorKeyword}}{{.ExistComponent}} {{.ExistHost}}{{ColorReset}} Please change to use another port or another host. `, properties)) } } } return nil } // TiKVLabelError indicates that some TiKV servers don't have correct labels type TiKVLabelError struct { TiKVInstances map[string][]error } // Error implements error func (e *TiKVLabelError) Error() string { ids := []string{} for id := range e.TiKVInstances { ids = append(ids, id) } sort.Strings(ids) str := "" for _, id := range ids { if len(e.TiKVInstances[id]) == 0 { continue } errs := []string{} for _, e := range e.TiKVInstances[id] { errs = append(errs, e.Error()) } sort.Strings(errs) str += fmt.Sprintf("%s:\n", id) for _, e := range errs { str += fmt.Sprintf("\t%s\n", e) } } return str } // TiKVLabelProvider provides the store labels information type TiKVLabelProvider interface { GetTiKVLabels() (map[string]map[string]string, []map[string]api.LabelInfo, error) } func getHostFromAddress(addr string) string { host, _ := utils.ParseHostPort(addr) return host } // CheckTiKVLabels will check if tikv missing label or have wrong label func CheckTiKVLabels(pdLocLabels []string, slp TiKVLabelProvider) error { lerr := &TiKVLabelError{ TiKVInstances: make(map[string][]error), } lbs := set.NewStringSet(pdLocLabels...) hosts := make(map[string]int) storeLabels, _, err := slp.GetTiKVLabels() if err != nil { return err } for kv := range storeLabels { host := getHostFromAddress(kv) hosts[host]++ } for kv, ls := range storeLabels { host := getHostFromAddress(kv) if len(ls) == 0 && hosts[host] > 1 { lerr.TiKVInstances[kv] = append( lerr.TiKVInstances[kv], errors.New("multiple TiKV instances are deployed at the same host but location label missing"), ) continue } for lname := range ls { if !lbs.Exist(lname) { lerr.TiKVInstances[kv] = append( lerr.TiKVInstances[kv], fmt.Errorf("label name '%s' is not specified in pd config (replication.location-labels: %v)", lname, pdLocLabels), ) } } } if len(lerr.TiKVInstances) == 0 { return nil } return lerr } // platformConflictsDetect checks for conflicts in topology for different OS / Arch // set to the same host / IP func (s *Specification) platformConflictsDetect() error { type ( conflict struct { os string arch string cfg string } ) platformStats := map[string]conflict{} topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // skip nodes imported from TiDB-Ansible if compSpec.Addr().Interface().(InstanceSpec).IsImported() { continue } // check hostname host := compSpec.FieldByName("Host").String() cfg := strings.Split(topoType.Field(i).Tag.Get("yaml"), ",")[0] // without meta if host == "" { return errors.Errorf("`%s` contains empty host field", cfg) } // platform conflicts stat := conflict{ cfg: cfg, } if j, found := findField(compSpec, "OS"); found { stat.os = compSpec.Field(j).String() } if j, found := findField(compSpec, "Arch"); found { stat.arch = compSpec.Field(j).String() } prev, exist := platformStats[host] if exist { if prev.os != stat.os || prev.arch != stat.arch { return &meta.ValidateErr{ Type: meta.TypeMismatch, Target: "platform", LHS: fmt.Sprintf("%s:%s/%s", prev.cfg, prev.os, prev.arch), RHS: fmt.Sprintf("%s:%s/%s", stat.cfg, stat.os, stat.arch), Value: host, } } } platformStats[host] = stat } } return nil } func (s *Specification) portInvalidDetect() error { topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() checkPort := func(idx int, compSpec reflect.Value) error { compSpec = reflect.Indirect(compSpec) cfg := strings.Split(topoType.Field(idx).Tag.Get("yaml"), ",")[0] for i := 0; i < compSpec.NumField(); i++ { if strings.HasSuffix(compSpec.Type().Field(i).Name, "Port") { port := int(compSpec.Field(i).Int()) // for NgPort, 0 means default and -1 means disable if compSpec.Type().Field(i).Name == "NgPort" && (port == -1 || port == 0) { continue } if port < 1 || port > 65535 { portField := strings.Split(compSpec.Type().Field(i).Tag.Get("yaml"), ",")[0] return errors.Errorf("`%s` of %s=%d is invalid, port should be in the range [1, 65535]", cfg, portField, port) } } } return nil } for i := 0; i < topoSpec.NumField(); i++ { compSpecs := topoSpec.Field(i) // check on struct if compSpecs.Kind() == reflect.Struct { if err := checkPort(i, compSpecs); err != nil { return err } continue } // check on slice for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) if err := checkPort(i, compSpec); err != nil { return err } } } return nil } func (s *Specification) portConflictsDetect() error { type ( usedPort struct { host string port int } conflict struct { tp string cfg string } ) portTypes := []string{ "Port", "StatusPort", "PeerPort", "ClientPort", "WebPort", "TCPPort", "HTTPPort", "FlashServicePort", "FlashProxyPort", "FlashProxyStatusPort", "ClusterPort", "NgPort", } portStats := map[usedPort]conflict{} uniqueHosts := set.NewStringSet() topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // check hostname host := compSpec.FieldByName("Host").String() cfg := strings.Split(topoType.Field(i).Tag.Get("yaml"), ",")[0] // without meta if host == "" { return errors.Errorf("`%s` contains empty host field", cfg) } uniqueHosts.Insert(host) // Ports conflicts for _, portType := range portTypes { if j, found := findField(compSpec, portType); found { item := usedPort{ host: host, port: int(compSpec.Field(j).Int()), } tp := compSpec.Type().Field(j).Tag.Get("yaml") prev, exist := portStats[item] if exist { return &meta.ValidateErr{ Type: meta.TypeConflict, Target: "port", LHS: fmt.Sprintf("%s:%s.%s", prev.cfg, item.host, prev.tp), RHS: fmt.Sprintf("%s:%s.%s", cfg, item.host, tp), Value: item.port, } } portStats[item] = conflict{ tp: tp, cfg: cfg, } } } } } // Port conflicts in monitored components monitoredPortTypes := []string{ "NodeExporterPort", "BlackboxExporterPort", } monitoredOpt := topoSpec.FieldByName(monitorOptionTypeName) for host := range uniqueHosts { cfg := "monitored" for _, portType := range monitoredPortTypes { f := monitoredOpt.FieldByName(portType) item := usedPort{ host: host, port: int(f.Int()), } ft, found := monitoredOpt.Type().FieldByName(portType) if !found { return errors.Errorf("incompatible change `%s.%s`", monitorOptionTypeName, portType) } // `yaml:"node_exporter_port,omitempty"` tp := strings.Split(ft.Tag.Get("yaml"), ",")[0] prev, exist := portStats[item] if exist { return &meta.ValidateErr{ Type: meta.TypeConflict, Target: "port", LHS: fmt.Sprintf("%s:%s.%s", prev.cfg, item.host, prev.tp), RHS: fmt.Sprintf("%s:%s.%s", cfg, item.host, tp), Value: item.port, } } portStats[item] = conflict{ tp: tp, cfg: cfg, } } } return nil } func (s *Specification) dirConflictsDetect() error { type ( usedDir struct { host string dir string } conflict struct { tp string cfg string imported bool } ) dirTypes := []string{ "DataDir", "DeployDir", } // usedInfo => type var dirStats = map[usedDir]conflict{} topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // check hostname host := compSpec.FieldByName("Host").String() cfg := strings.Split(topoType.Field(i).Tag.Get("yaml"), ",")[0] // without meta if host == "" { return errors.Errorf("`%s` contains empty host field", cfg) } // Directory conflicts for _, dirType := range dirTypes { j, found := findField(compSpec, dirType) if !found { continue } // `yaml:"data_dir,omitempty"` tp := strings.Split(compSpec.Type().Field(j).Tag.Get("yaml"), ",")[0] for dir := range strings.SplitSeq(compSpec.Field(j).String(), ",") { dir = strings.TrimSpace(dir) item := usedDir{ host: host, dir: dir, } // data_dir is relative to deploy_dir by default, so they can be with // same (sub) paths as long as the deploy_dirs are different if item.dir != "" && !strings.HasPrefix(item.dir, "/") { continue } prev, exist := dirStats[item] // not checking between imported nodes if exist && !(compSpec.Addr().Interface().(InstanceSpec).IsImported() && prev.imported) { return &meta.ValidateErr{ Type: meta.TypeConflict, Target: "directory", LHS: fmt.Sprintf("%s:%s.%s", prev.cfg, item.host, prev.tp), RHS: fmt.Sprintf("%s:%s.%s", cfg, item.host, tp), Value: item.dir, } } // not reporting error for nodes imported from TiDB-Ansible, but keep // their dirs in the map to check if other nodes are using them dirStats[item] = conflict{ tp: tp, cfg: cfg, imported: compSpec.Addr().Interface().(InstanceSpec).IsImported(), } } } } } return nil } // CountDir counts for dir paths used by any instance in the cluster with the same // prefix, useful to find potential path conflicts func (s *Specification) CountDir(targetHost, dirPrefix string) int { dirTypes := []string{ "DeployDir", "DataDir", "LogDir", } // path -> count dirStats := make(map[string]int) count := 0 topoSpec := reflect.ValueOf(s).Elem() dirPrefix = Abs(s.GlobalOptions.User, dirPrefix) addHostDir := func(host, deployDir, dir string) { if !strings.HasPrefix(dir, "/") { dir = filepath.Join(deployDir, dir) } dir = Abs(s.GlobalOptions.User, dir) dirStats[host+dir]++ } for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) deployDir := compSpec.FieldByName("DeployDir").String() host := compSpec.FieldByName("Host").String() if compSpec.FieldByName("ManageHost").String() != "" { host = compSpec.FieldByName("ManageHost").String() } for _, dirType := range dirTypes { j, found := findField(compSpec, dirType) if !found { continue } dir := compSpec.Field(j).String() switch dirType { // the same as in instance.go for (*instance) case "DeployDir": addHostDir(host, deployDir, "") case "DataDir": // the default data_dir is relative to deploy_dir if dir == "" { addHostDir(host, deployDir, dir) continue } for dataDir := range strings.SplitSeq(dir, ",") { dataDir = strings.TrimSpace(dataDir) if dataDir != "" { addHostDir(host, deployDir, dataDir) } } case "LogDir": field := compSpec.FieldByName("LogDir") if field.IsValid() { dir = field.Interface().(string) } if dir == "" { dir = "log" } addHostDir(host, deployDir, strings.TrimSpace(dir)) } } } } for k, v := range dirStats { if k == targetHost+dirPrefix || strings.HasPrefix(k, targetHost+dirPrefix+"/") { count += v } } return count } func (s *Specification) validateTiSparkSpec() error { // There must be a Spark master if len(s.TiSparkMasters) == 0 { if len(s.TiSparkWorkers) == 0 { return nil } return ErrNoTiSparkMaster } // We only support 1 Spark master at present if len(s.TiSparkMasters) > 1 { return ErrMultipleTiSparkMaster } // Multiple workers on the same host is not supported by Spark if len(s.TiSparkWorkers) > 1 { cnt := make(map[string]int) for _, w := range s.TiSparkWorkers { if cnt[w.Host] > 0 { return errors.Annotatef(ErrMultipleTisparkWorker, "the host %s is duplicated", w.Host) } cnt[w.Host]++ } } return nil } func (s *Specification) validateTLSEnabled() error { if !s.GlobalOptions.TLSEnabled { return nil } // check for component with no tls support compList := make([]Component, 0) s.IterComponent(func(c Component) { if len(c.Instances()) > 0 { compList = append(compList, c) } }) for _, c := range compList { switch c.Name() { case ComponentPD, ComponentTSO, ComponentScheduling, ComponentTiDB, ComponentTiKV, ComponentTiFlash, ComponentTiProxy, ComponentPump, ComponentDrainer, ComponentCDC, ComponentTiKVCDC, ComponentDashboard, ComponentPrometheus, ComponentAlertmanager, ComponentGrafana: default: return errors.Errorf("component %s is not supported in TLS enabled cluster", c.Name()) } } return nil } func (s *Specification) validateUserGroup() error { gOpts := s.GlobalOptions if user := gOpts.User; !reUser.MatchString(user) { return errors.Annotatef(ErrUserOrGroupInvalid, "`global` of user='%s' is invalid", user) } // if group is nil, then we'll set it to the same as user if group := gOpts.Group; group != "" && !reGroup.MatchString(group) { return errors.Annotatef(ErrUserOrGroupInvalid, "`global` of group='%s' is invalid", group) } return nil } func (s *Specification) validatePDNames() error { // check pdserver name pdNames := set.NewStringSet() for _, pd := range s.PDServers { if pd.Name == "" { continue } if pdNames.Exist(pd.Name) { return errors.Errorf("component pd_servers.name is not supported duplicated, the name %s is duplicated", pd.Name) } pdNames.Insert(pd.Name) } return nil } func (s *Specification) validateTSONames() error { // check tso server name tsoNames := set.NewStringSet() for _, tso := range s.TSOServers { if tso.Name == "" { continue } if tsoNames.Exist(tso.Name) { return errors.Errorf("component tso_servers.name is not supported duplicated, the name %s is duplicated", tso.Name) } tsoNames.Insert(tso.Name) } return nil } func (s *Specification) validateSchedulingNames() error { // check scheduling server name schedulingNames := set.NewStringSet() for _, scheduling := range s.SchedulingServers { if scheduling.Name == "" { continue } if schedulingNames.Exist(scheduling.Name) { return errors.Errorf("component scheduling_servers.name is not supported duplicated, the name %s is duplicated", scheduling.Name) } schedulingNames.Insert(scheduling.Name) } return nil } func (s *Specification) validateTiFlashConfigs() error { c := FindComponent(s, ComponentTiFlash) for _, ins := range c.Instances() { if err := ins.(*TiFlashInstance).CheckIncorrectConfigs(); err != nil { return err } } return nil } // validateMonitorAgent checks for conflicts in topology for different ignore_exporter // settings for multiple instances on the same host / IP func (s *Specification) validateMonitorAgent() error { type ( conflict struct { ignore bool cfg string } ) agentStats := map[string]conflict{} topoSpec := reflect.ValueOf(s).Elem() topoType := reflect.TypeOf(s).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // skip nodes imported from TiDB-Ansible if compSpec.Addr().Interface().(InstanceSpec).IsImported() { continue } // check hostname host := compSpec.FieldByName("Host").String() cfg := strings.Split(topoType.Field(i).Tag.Get("yaml"), ",")[0] // without meta if host == "" { return errors.Errorf("`%s` contains empty host field", cfg) } // agent conflicts stat := conflict{} if j, found := findField(compSpec, "IgnoreExporter"); found { stat.ignore = compSpec.Field(j).Bool() stat.cfg = cfg } prev, exist := agentStats[host] if exist { if prev.ignore != stat.ignore { return &meta.ValidateErr{ Type: meta.TypeMismatch, Target: "ignore_exporter", LHS: fmt.Sprintf("%s:%v", prev.cfg, prev.ignore), RHS: fmt.Sprintf("%s:%v", stat.cfg, stat.ignore), Value: host, } } } agentStats[host] = stat } } return nil } // Validate validates the topology specification and produce error if the // specification invalid (e.g: port conflicts or directory conflicts) func (s *Specification) Validate() error { validators := []func() error{ s.validateTLSEnabled, s.platformConflictsDetect, s.portInvalidDetect, s.portConflictsDetect, s.dirConflictsDetect, s.validateUserGroup, s.validatePDNames, s.validateTSONames, s.validateSchedulingNames, s.validateTiSparkSpec, s.validateTiFlashConfigs, s.validateMonitorAgent, } for _, v := range validators { if err := v(); err != nil { return err } } return RelativePathDetect(s, isSkipField) } // RelativePathDetect detect if some specific path is relative path and report error func RelativePathDetect(topo any, isSkipField func(reflect.Value) bool) error { pathTypes := []string{ "ConfigFilePath", "RuleDir", "DashboardDir", } topoSpec := reflect.ValueOf(topo).Elem() for i := 0; i < topoSpec.NumField(); i++ { if isSkipField(topoSpec.Field(i)) { continue } compSpecs := topoSpec.Field(i) for index := 0; index < compSpecs.Len(); index++ { compSpec := reflect.Indirect(compSpecs.Index(index)) // Relateve path detect for _, field := range pathTypes { if j, found := findField(compSpec, field); found { // `yaml:"xxxx,omitempty"` fieldName := strings.Split(compSpec.Type().Field(j).Tag.Get("yaml"), ",")[0] localPath := compSpec.Field(j).String() if localPath != "" && !strings.HasPrefix(localPath, "/") { return fmt.Errorf("relative path is not allowed for field %s: %s", fieldName, localPath) } } } } } return nil } tiup-1.16.3/pkg/cluster/spec/validate_test.go000066400000000000000000001021641505422223000211350ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package spec import ( "bytes" "fmt" "os" "path/filepath" "testing" "github.com/joomcode/errorx" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/utils" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) func TestDirectoryConflicts1(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tidb_servers: - host: 172.16.5.138 deploy_dir: "/test-1" pd_servers: - host: 172.16.5.138 data_dir: "/test-1" `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/test-1' between 'tidb_servers:172.16.5.138.deploy_dir' and 'pd_servers:172.16.5.138.data_dir'", err.Error()) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "/test-data" tikv_servers: - host: 172.16.5.138 data_dir: "test-1" pd_servers: - host: 172.16.5.138 data_dir: "test-1" `), &topo) require.NoError(t, err) // report conflict if a non-import node use same dir as an imported one err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: deploy data_dir: data tidb_servers: - host: 172.16.4.190 deploy_dir: /home/tidb/deploy pd_servers: - host: 172.16.4.190 imported: true name: pd_ip-172-16-4-190 deploy_dir: /home/tidb/deploy data_dir: /home/tidb/deploy/data.pd log_dir: /home/tidb/deploy/log `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/home/tidb/deploy' between 'tidb_servers:172.16.4.190.deploy_dir' and 'pd_servers:172.16.4.190.deploy_dir'", err.Error()) // two imported tidb pass the validation, two pd servers (only one is imported) don't err = yaml.Unmarshal([]byte(` global: user: "test2" ssh_port: 220 deploy_dir: deploy data_dir: /data tidb_servers: - host: 172.16.4.190 imported: true port: 3306 deploy_dir: /home/tidb/deploy1 - host: 172.16.4.190 imported: true status_port: 3307 deploy_dir: /home/tidb/deploy1 pd_servers: - host: 172.16.4.190 imported: true name: pd_ip-172-16-4-190 deploy_dir: /home/tidb/deploy - host: 172.16.4.190 name: pd_ip-172-16-4-190-2 client_port: 2381 peer_port: 2382 deploy_dir: /home/tidb/deploy `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/home/tidb/deploy' between 'pd_servers:172.16.4.190.deploy_dir' and 'pd_servers:172.16.4.190.deploy_dir'", err.Error()) } func TestPortConflicts(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tidb_servers: - host: 172.16.5.138 port: 1234 tikv_servers: - host: 172.16.5.138 status_port: 1234 `), &topo) require.Error(t, err) require.Equal(t, "port conflict for '1234' between 'tidb_servers:172.16.5.138.port' and 'tikv_servers:172.16.5.138.status_port'", err.Error()) topo = Specification{} err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tiflash_servers: - host: 172.16.5.138 tcp_port: 111 http_port: 222 flash_service_port: 1234 flash_proxy_port: 444 flash_proxy_status_port: 555 metrics_port: 666 - host: 172.16.5.138 tcp_port: 1111 http_port: 1222 flash_service_port: 1333 flash_proxy_port: 1444 flash_proxy_status_port: 1234 metrics_port: 1666 `), &topo) require.Error(t, err) require.Equal(t, "port conflict for '1234' between 'tiflash_servers:172.16.5.138.flash_service_port' and 'tiflash_servers:172.16.5.138.flash_proxy_status_port'", err.Error()) topo = Specification{} err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tiflash_servers: - host: 172.16.5.138 tcp_port: 111 http_port: 222 flash_service_port: 333 flash_proxy_port: 1234 flash_proxy_status_port: 555 metrics_port: 666 - host: 172.16.5.138 tcp_port: 1111 http_port: 1222 flash_service_port: 1333 flash_proxy_port: 1234 flash_proxy_status_port: 1555 metrics_port: 1666 `), &topo) require.Error(t, err) require.Equal(t, "port conflict for '1234' between 'tiflash_servers:172.16.5.138.flash_proxy_port' and 'tiflash_servers:172.16.5.138.flash_proxy_port'", err.Error()) topo = Specification{} // tispark_masters has "omitempty" in its tag value err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 1234 tispark_masters: - host: 172.16.5.138 port: 1234 tikv_servers: - host: 172.16.5.138 status_port: 2345 `), &topo) require.Error(t, err) require.Equal(t, "port conflict for '1234' between 'tispark_masters:172.16.5.138.port' and 'monitored:172.16.5.138.node_exporter_port'", err.Error()) } func TestPlatformConflicts(t *testing.T) { // aarch64 and arm64 are equal topo := Specification{} err := yaml.Unmarshal([]byte(` global: os: "linux" arch: "aarch64" tidb_servers: - host: 172.16.5.138 arch: "arm64" os: "linux" tikv_servers: - host: 172.16.5.138 arch: "arm64" os: "linux" `), &topo) require.NoError(t, err) // different arch defined for the same host topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" tidb_servers: - host: 172.16.5.138 arch: "aarch64" os: "linux" tikv_servers: - host: 172.16.5.138 arch: "amd64" os: "linux" `), &topo) require.Error(t, err) require.Equal(t, "platform mismatch for '172.16.5.138' between 'tidb_servers:linux/arm64' and 'tikv_servers:linux/amd64'", err.Error()) // different os defined for the same host topo = Specification{} err = yaml.Unmarshal([]byte(` global: os: "linux" arch: "aarch64" tidb_servers: - host: 172.16.5.138 os: "darwin" arch: "arm64" tikv_servers: - host: 172.16.5.138 arch: "arm64" os: "linux" `), &topo) require.Error(t, err) require.Equal(t, "platform mismatch for '172.16.5.138' between 'tidb_servers:darwin/arm64' and 'tikv_servers:linux/arm64'", err.Error()) } func TestCountDir(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" tikv_servers: - host: 172.16.5.138 data_dir: "/test-data/data-1" pd_servers: - host: 172.16.5.138 data_dir: "/test-data/data-2" `), &topo) require.NoError(t, err) cnt := topo.CountDir("172.16.5.138", "/home/test1/test-deploy/pd-2379") require.Equal(t, 2, cnt) cnt = topo.CountDir("172.16.5.138", "") // the default user home require.Equal(t, 4, cnt) cnt = topo.CountDir("172.16.5.138", "/test-data/data") require.Equal(t, 0, cnt) // should not match partial path err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "/test-deploy" tikv_servers: - host: 172.16.5.138 data_dir: "/test-data/data-1" pd_servers: - host: 172.16.5.138 data_dir: "/test-data/data-2" `), &topo) require.NoError(t, err) cnt = topo.CountDir("172.16.5.138", "/test-deploy/pd-2379") require.Equal(t, 2, cnt) cnt = topo.CountDir("172.16.5.138", "") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.16.5.138", "test-data") require.Equal(t, 0, cnt) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "/test-deploy" data_dir: "/test-data" tikv_servers: - host: 172.16.5.138 data_dir: "data-1" pd_servers: - host: 172.16.5.138 data_dir: "data-2" - host: 172.16.5.139 `), &topo) require.NoError(t, err) // if per-instance data_dir is set, the global data_dir is ignored, and if it // is a relative path, it will be under the instance's deploy_dir cnt = topo.CountDir("172.16.5.138", "/test-deploy/pd-2379") require.Equal(t, 3, cnt) cnt = topo.CountDir("172.16.5.138", "") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.16.5.139", "/test-data") require.Equal(t, 1, cnt) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: deploy data_dir: data tidb_servers: - host: 172.16.4.190 imported: true deploy_dir: /home/tidb/deploy pd_servers: - host: 172.16.4.190 imported: true name: pd_ip-172-16-4-190 deploy_dir: /home/tidb/deploy data_dir: /home/tidb/deploy/data.pd log_dir: /home/tidb/deploy/log `), &topo) require.NoError(t, err) cnt = topo.CountDir("172.16.4.190", "/home/tidb/deploy") require.Equal(t, 5, cnt) } func TestCountDir2(t *testing.T) { file := filepath.Join("testdata", "countdir.yaml") meta := ClusterMeta{} yamlFile, err := os.ReadFile(file) require.NoError(t, err) decoder := yaml.NewDecoder(bytes.NewReader(yamlFile)) decoder.KnownFields(true) err = decoder.Decode(&meta) require.NoError(t, err) topo := meta.Topology // If the imported dir is somehow containing paths ens with slash, // or having multiple slash in it, the count result should not // be different. cnt := topo.CountDir("172.17.0.4", "/foo/bar/sometidbpath123") require.Equal(t, 3, cnt) cnt = topo.CountDir("172.17.0.4", "/foo/bar/sometidbpath123/") require.Equal(t, 3, cnt) cnt = topo.CountDir("172.17.0.4", "/foo/bar/sometidbpath123//") require.Equal(t, 3, cnt) cnt = topo.CountDir("172.17.0.4", "/foo/bar/sometidbpath123/log") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.17.0.4", "/foo/bar/sometidbpath123//log") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.17.0.4", "/foo/bar/sometidbpath123/log/") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.17.0.4", "/foo/bar/sometidbpath123//log/") require.Equal(t, 1, cnt) } func TestTiSparkSpecValidation(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 peer_port: 1234 tispark_masters: - host: 172.16.5.138 port: 1235 tispark_workers: - host: 172.16.5.138 port: 1236 - host: 172.16.5.139 port: 1235 `), &topo) require.NoError(t, err) topo = Specification{} err = yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 peer_port: 1234 tispark_masters: - host: 172.16.5.138 port: 1235 - host: 172.16.5.139 port: 1235 `), &topo) require.Error(t, err) require.Equal(t, "a TiSpark enabled cluster with more than 1 Spark master node is not supported", err.Error()) topo = Specification{} err = yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 peer_port: 1234 tispark_workers: - host: 172.16.5.138 port: 1235 - host: 172.16.5.139 port: 1235 `), &topo) require.Error(t, err) require.Equal(t, "there must be a Spark master node if you want to use the TiSpark component", err.Error()) err = yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 peer_port: 1234 tispark_masters: - host: 172.16.5.138 port: 1236 tispark_workers: - host: 172.16.5.138 port: 1235 - host: 172.16.5.139 port: 1235 - host: 172.16.5.139 port: 1236 web_port: 8089 `), &topo) require.Error(t, err) require.Equal(t, "the host 172.16.5.139 is duplicated: multiple TiSpark workers on the same host is not supported by Spark", err.Error()) } func TestTLSEnabledValidation(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: enable_tls: true pd_servers: - host: 172.16.5.138 peer_port: 1234 tidb_servers: - host: 172.16.5.138 port: 1235 tikv_servers: - host: 172.16.5.138 port: 1236 `), &topo) require.NoError(t, err) topo = Specification{} err = yaml.Unmarshal([]byte(` global: enable_tls: true tidb_servers: - host: 172.16.5.138 port: 1234 tispark_masters: - host: 172.16.5.138 port: 1235 - host: 172.16.5.139 port: 1235 `), &topo) require.Error(t, err) require.Equal(t, "component tispark is not supported in TLS enabled cluster", err.Error()) } func TestMonitorAgentValidation(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 port: 1234 - host: 172.16.5.139 ignore_exporter: true `), &topo) require.NoError(t, err) topo = Specification{} err = yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 port: 1234 tikv_servers: - host: 172.16.5.138 ignore_exporter: true `), &topo) require.Error(t, err) require.Equal(t, "ignore_exporter mismatch for '172.16.5.138' between 'tikv_servers:true' and 'pd_servers:false'", err.Error()) } func TestCrossClusterPortConflicts(t *testing.T) { topo1 := Specification{} err := yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 client_port: 1234 peer_port: 1235 `), &topo1) require.NoError(t, err) topo2 := Specification{} err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 9101 blackbox_exporter_port: 9116 pd_servers: - host: 172.16.5.138 client_port: 2234 peer_port: 2235 - host: 172.16.5.139 client_port: 2236 peer_port: 2237 ignore_exporter: true `), &topo2) require.NoError(t, err) clsList := make(map[string]Metadata) // no port conflict with empty list err = CheckClusterPortConflict(clsList, "topo", &topo2) require.NoError(t, err) clsList["topo1"] = &ClusterMeta{Topology: &topo1} // no port conflict err = CheckClusterPortConflict(clsList, "topo", &topo2) require.NoError(t, err) // add topo2 to the list clsList["topo2"] = &ClusterMeta{Topology: &topo2} // monitoring agent port conflict topo3 := Specification{} err = yaml.Unmarshal([]byte(` tidb_servers: - host: 172.16.5.138 `), &topo3) require.NoError(t, err) err = CheckClusterPortConflict(clsList, "topo", &topo3) require.Error(t, err) require.Equal(t, "spec.deploy.port_conflict: Deploy port conflicts to an existing cluster", errors.Cause(err).Error()) suggestion, ok := errorx.ExtractProperty(err, utils.ErrPropSuggestion) require.True(t, ok) require.Equal(t, `The port you specified in the topology file is: Port: 9100 Component: monitor 172.16.5.138 It conflicts to a port in the existing cluster: Existing Cluster Name: topo1 Existing Port: 9100 Existing Component: monitor 172.16.5.138 Please change to use another port or another host.`, suggestion) // monitoring agent port conflict but the instance marked as ignore_exporter topo3 = Specification{} err = yaml.Unmarshal([]byte(` tidb_servers: - host: 172.16.5.138 ignore_exporter: true `), &topo3) require.NoError(t, err) err = CheckClusterPortConflict(clsList, "topo", &topo3) require.NoError(t, err) // monitoring agent port conflict but the existing instance marked as ignore_exporter topo3 = Specification{} err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 9102 blackbox_exporter_port: 9116 tidb_servers: - host: 172.16.5.139 `), &topo3) require.NoError(t, err) err = CheckClusterPortConflict(clsList, "topo", &topo3) require.NoError(t, err) // component port conflict topo4 := Specification{} err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 9102 blackbox_exporter_port: 9117 pump_servers: - host: 172.16.5.138 port: 2235 `), &topo4) require.NoError(t, err) err = CheckClusterPortConflict(clsList, "topo", &topo4) require.Error(t, err) require.Equal(t, "spec.deploy.port_conflict: Deploy port conflicts to an existing cluster", errors.Cause(err).Error()) suggestion, ok = errorx.ExtractProperty(err, utils.ErrPropSuggestion) require.True(t, ok) require.Equal(t, `The port you specified in the topology file is: Port: 2235 Component: pump 172.16.5.138 It conflicts to a port in the existing cluster: Existing Cluster Name: topo2 Existing Port: 2235 Existing Component: pd 172.16.5.138 Please change to use another port or another host.`, suggestion) } func TestCrossClusterDirConflicts(t *testing.T) { topo1 := Specification{} err := yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 client_port: 1234 peer_port: 1235 `), &topo1) require.NoError(t, err) topo2 := Specification{} err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 9101 blackbox_exporter_port: 9116 pd_servers: - host: 172.16.5.138 client_port: 2234 peer_port: 2235 `), &topo2) require.NoError(t, err) clsList := make(map[string]Metadata) // no port conflict with empty list err = CheckClusterDirConflict(clsList, "topo", &topo2) require.NoError(t, err) clsList["topo1"] = &ClusterMeta{Topology: &topo1} // no port conflict err = CheckClusterDirConflict(clsList, "topo", &topo2) require.NoError(t, err) // add topo2 to the list clsList["topo2"] = &ClusterMeta{Topology: &topo2} // monitoring agent dir conflict topo3 := Specification{} err = yaml.Unmarshal([]byte(` tidb_servers: - host: 172.16.5.138 `), &topo3) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo3) require.Error(t, err) require.Equal(t, "spec.deploy.dir_conflict: Deploy directory conflicts to an existing cluster", errors.Cause(err).Error()) suggestion, ok := errorx.ExtractProperty(err, utils.ErrPropSuggestion) require.True(t, ok) require.Equal(t, `The directory you specified in the topology file is: Directory: monitor deploy directory /home/tidb/deploy/monitor-9100 Component: tidb 172.16.5.138 It conflicts to a directory in the existing cluster: Existing Cluster Name: topo1 Existing Directory: monitor deploy directory /home/tidb/deploy/monitor-9100 Existing Component: pd 172.16.5.138 Please change to use another directory or another host.`, suggestion) // no dir conflict error if one of the instance is marked as ignore_exporter err = yaml.Unmarshal([]byte(` tidb_servers: - host: 172.16.5.138 ignore_exporter: true `), &topo3) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo3) require.NoError(t, err) // component with different port has no dir conflict topo4 := Specification{} err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 9102 blackbox_exporter_port: 9117 pump_servers: - host: 172.16.5.138 port: 2235 `), &topo4) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo4) require.NoError(t, err) // component with relative dir has no dir conflic err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 9102 blackbox_exporter_port: 9117 pump_servers: - host: 172.16.5.138 port: 2235 data_dir: "pd-1234" `), &topo4) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo4) require.NoError(t, err) // component with absolute dir conflict err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 9102 blackbox_exporter_port: 9117 pump_servers: - host: 172.16.5.138 port: 2235 data_dir: "/home/tidb/deploy/pd-2234" `), &topo4) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo4) require.Error(t, err) require.Equal(t, "spec.deploy.dir_conflict: Deploy directory conflicts to an existing cluster", errors.Cause(err).Error()) suggestion, ok = errorx.ExtractProperty(err, utils.ErrPropSuggestion) require.True(t, ok) require.Equal(t, `The directory you specified in the topology file is: Directory: data directory /home/tidb/deploy/pd-2234 Component: pump 172.16.5.138 It conflicts to a directory in the existing cluster: Existing Cluster Name: topo2 Existing Directory: deploy directory /home/tidb/deploy/pd-2234 Existing Component: pd 172.16.5.138 Please change to use another directory or another host.`, suggestion) } func TestRelativePathDetect(t *testing.T) { servers := map[string]string{ "monitoring_servers": "rule_dir", "grafana_servers": "dashboard_dir", "alertmanager_servers": "config_file", } paths := map[string]bool{ "/an/absolute/path": true, "an/relative/path": false, "./an/relative/path": false, "../an/relative/path": false, } for server, field := range servers { for p, expectNil := range paths { topo5 := Specification{} content := fmt.Sprintf(` %s: - host: 1.1.1.1 %s: %s `, server, field, p) if expectNil { require.Nil(t, yaml.Unmarshal([]byte(content), &topo5)) } else { require.NotNil(t, yaml.Unmarshal([]byte(content), &topo5)) } } } } func TestTiKVLocationLabelsCheck(t *testing.T) { // 2 tikv on different host topo := Specification{} err := yaml.Unmarshal([]byte(` tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 - host: 172.16.5.139 port: 20160 status_port: 20180 `), &topo) require.NoError(t, err) err = CheckTiKVLabels(nil, &topo) require.NoError(t, err) err = CheckTiKVLabels([]string{}, &topo) require.NoError(t, err) // 2 tikv on the same host without label topo = Specification{} err = yaml.Unmarshal([]byte(` tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 - host: 172.16.5.140 port: 20161 status_port: 20181 `), &topo) require.NoError(t, err) err = CheckTiKVLabels(nil, &topo) require.Error(t, err) // 2 tikv on the same host with unacquainted label topo = Specification{} err = yaml.Unmarshal([]byte(` tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 config: server.labels: { zone: "zone1", host: "172.16.5.140" } - host: 172.16.5.140 port: 20161 status_port: 20181 config: server.labels: { zone: "zone1", host: "172.16.5.140" } `), &topo) require.NoError(t, err) err = CheckTiKVLabels(nil, &topo) require.Error(t, err) // 2 tikv on the same host with correct label topo = Specification{} err = yaml.Unmarshal([]byte(` tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 config: server.labels: { zone: "zone1", host: "172.16.5.140" } - host: 172.16.5.140 port: 20161 status_port: 20181 config: server.labels: { zone: "zone1", host: "172.16.5.140" } `), &topo) require.NoError(t, err) err = CheckTiKVLabels([]string{"zone", "host"}, &topo) require.NoError(t, err) // 2 tikv on the same host with different config style topo = Specification{} err = yaml.Unmarshal([]byte(` tikv_servers: - host: 172.16.5.140 port: 20160 status_port: 20180 config: server: labels: { zone: "zone1", host: "172.16.5.140" } - host: 172.16.5.140 port: 20161 status_port: 20181 config: server.labels: zone: "zone1" host: "172.16.5.140" `), &topo) require.NoError(t, err) err = CheckTiKVLabels([]string{"zone", "host"}, &topo) require.NoError(t, err) } func TestCountDirMultiPath(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" tiflash_servers: - host: 172.19.0.104 data_dir: "/home/tidb/birdstorm/data1, /home/tidb/birdstorm/data3" `), &topo) require.NoError(t, err) cnt := topo.CountDir("172.19.0.104", "/home/tidb/birdstorm/data1") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm/data2") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm/data3") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm") require.Equal(t, 2, cnt) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" tiflash_servers: - host: 172.19.0.104 data_dir: "birdstorm/data1,/birdstorm/data3" `), &topo) require.NoError(t, err) cnt = topo.CountDir("172.19.0.104", "/home/test1/test-deploy/tiflash-9000/birdstorm/data1") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.19.0.104", "/birdstorm/data3") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm/data3") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.19.0.104", "/home/test1/test-deploy/tiflash-9000/birdstorm/data3") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.19.0.104", "/birdstorm") require.Equal(t, 1, cnt) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" tiflash_servers: - host: 172.19.0.104 data_dir: /data1 # this is ignored config: # test with these paths storage.main.dir: [ /home/tidb/birdstorm/data1,/home/tidb/birdstorm/data3] `), &topo) require.NoError(t, err) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm/data1") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm/data2") require.Equal(t, 0, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm/data3") require.Equal(t, 1, cnt) cnt = topo.CountDir("172.19.0.104", "/home/tidb/birdstorm") require.Equal(t, 2, cnt) } func TestDirectoryConflictsWithMultiDir(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tiflash_servers: - host: 172.16.5.138 data_dir: " /test-1, /test-2" pd_servers: - host: 172.16.5.138 data_dir: "/test-2" `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/test-2' between 'tiflash_servers:172.16.5.138.data_dir' and 'pd_servers:172.16.5.138.data_dir'", err.Error()) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tiflash_servers: - host: 172.16.5.138 data_dir: "/test-1,/test-1" pd_servers: - host: 172.16.5.138 data_dir: "/test-2" `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/test-1' between 'tiflash_servers:172.16.5.138.data_dir' and 'tiflash_servers:172.16.5.138.data_dir'", err.Error()) } func TestDirectoryConflictsWithTiFlashMultiDir2(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tiflash_servers: - host: 172.16.5.138 data_dir: "/test-1" # this will be overwrite by storage.main.dir config: storage.main.dir: [ /test-1, /test-2] pd_servers: - host: 172.16.5.138 data_dir: "/test-2" `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/test-2' between 'tiflash_servers:172.16.5.138.data_dir' and 'pd_servers:172.16.5.138.data_dir'", err.Error()) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tiflash_servers: - host: 172.16.5.138 # this will be overwrite by storage.main.dir data_dir: "/test-1" config: storage.main.dir: [ /test-2, /test-2 ] # conflict inside pd_servers: - host: 172.16.5.138 data_dir: "/test-1" `), &topo) require.Error(t, err) require.Equal(t, "directory conflict for '/test-2' between 'tiflash_servers:172.16.5.138.config.storage.main.dir' and 'tiflash_servers:172.16.5.138.config.storage.main.dir'", err.Error()) err = yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: "test-deploy" data_dir: "test-data" tiflash_servers: - host: 172.16.5.138 data_dir: "/test-1" # this will be overwrite by storage.main.dir config: # no conflict between main and latest storage.main.dir: [ /test-1, /test-2] storage.latest.dir: [ /test-1, /test-2] pd_servers: - host: 172.16.5.138 data_dir: "/test-3" `), &topo) require.NoError(t, err) } func TestPdServerWithSameName(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 peer_port: 1234 name: name1 - host: 172.16.5.139 perr_port: 1234 name: name2 `), &topo) require.NoError(t, err) topo = Specification{} err = yaml.Unmarshal([]byte(` pd_servers: - host: 172.16.5.138 peer_port: 1234 name: name1 - host: 172.16.5.139 perr_port: 1234 name: name1 `), &topo) require.Error(t, err) require.Equal(t, "component pd_servers.name is not supported duplicated, the name name1 is duplicated", err.Error()) } func TestInvalidPort(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: ssh_port: 65536 `), &topo) require.Error(t, err) require.Equal(t, "`global` of ssh_port=65536 is invalid, port should be in the range [1, 65535]", err.Error()) err = yaml.Unmarshal([]byte(` global: ssh_port: 655 tidb_servers: - host: 172.16.5.138 port: -1 `), &topo) require.Error(t, err) require.Equal(t, "`tidb_servers` of port=-1 is invalid, port should be in the range [1, 65535]", err.Error()) err = yaml.Unmarshal([]byte(` monitored: node_exporter_port: 102400 `), &topo) require.Error(t, err) require.Equal(t, "`monitored` of node_exporter_port=102400 is invalid, port should be in the range [1, 65535]", err.Error()) } func TestInvalidUserGroup(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: helloworldtidb-_-_ group: wor_l-d `), &topo) require.NoError(t, err) require.Equal(t, "helloworldtidb-_-_", topo.GlobalOptions.User) require.Equal(t, "wor_l-d", topo.GlobalOptions.Group) topo = Specification{} err = yaml.Unmarshal([]byte(` global: user: ../hello `), &topo) require.Error(t, err) topo = Specification{} err = yaml.Unmarshal([]byte(` global: user: hel.lo `), &topo) require.Error(t, err) topo = Specification{} err = yaml.Unmarshal([]byte(` global: group: hello123456789012 `), &topo) require.Error(t, err) } func TestMissingGroup(t *testing.T) { topo := Specification{} err := yaml.Unmarshal([]byte(` global: user: tidb `), &topo) require.NoError(t, err) require.Equal(t, "tidb", topo.GlobalOptions.User) require.Equal(t, "", topo.GlobalOptions.Group) } func TestLogDirUnderDataDir(t *testing.T) { topo := Specification{} clsList := make(map[string]Metadata) err := yaml.Unmarshal([]byte(` global: user: "test1" ssh_port: 220 deploy_dir: deploy data_dir: data tikv_servers: - host: n1 port: 32160 status_port: 32180 log_dir: "/home/tidb6wu/tidb1-data/tikv-32160/log" data_dir: "/home/tidb6wu/tidb1-data/tikv-32160" `), &topo) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo) require.Error(t, err) require.Equal(t, "spec.deploy.dir_overlap: Deploy directory overlaps to another instance", err.Error()) suggestion, ok := errorx.ExtractProperty(err, utils.ErrPropSuggestion) require.True(t, ok) require.Equal(t, `The directory you specified in the topology file is: Directory: data directory /home/tidb6wu/tidb1-data/tikv-32160 Component: tikv n1 It overlaps to another instance: Other Directory: log directory /home/tidb6wu/tidb1-data/tikv-32160/log Other Component: tikv n1 Please modify the topology file and try again.`, suggestion) goodTopos := []string{ ` tikv_servers: - host: n1 log_dir: 'tikv-data/log' - host: n2 data_dir: 'tikv-data' `, ` tikv_servers: - host: n1 port: 32160 status_port: 32180 log_dir: "/home/tidb6wu/tidb1-data/tikv-32160-log" data_dir: "/home/tidb6wu/tidb1-data/tikv-32160" `, ` monitored: node_exporter_port: 9100 blackbox_exporter_port: 9115 deploy_dir: /data/deploy/monitor-9100 data_dir: /data/deploy/monitor-9100 log_dir: /data/deploy/monitor-9100/log pd_servers: - host: n0 name: pd0 imported: true deploy_dir: /data/deploy data_dir: /data/deploy/data.pd log_dir: /data/deploy/log - host: n1 name: pd1 log_dir: "/data/deploy/pd-2379/log" data_dir: "/data/pd-2379" deploy_dir: "/data/deploy/pd-2379" cdc_servers: - host: n1 port: 8300 deploy_dir: /data/deploy/ticdc-8300 data_dir: /data1/ticdc-8300 log_dir: /data/deploy/ticdc-8300/log `, } for _, s := range goodTopos { err = yaml.Unmarshal([]byte(s), &topo) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo) require.NoError(t, err) } overlapTopos := []string{ ` tikv_servers: - host: n1 log_dir: 'tikv-data/log' data_dir: 'tikv-data' `, ` tikv_servers: - host: n1 log_dir: '/home/tidb6wu/tidb1-data/tikv-32160/log' data_dir: '/home/tidb6wu/tidb1-data/tikv-32160' `, ` tikv_servers: - host: n1 log_dir: '/home/tidb6wu/tidb1-data/tikv-32160/log' data_dir: '/home/tidb6wu/tidb1-data/tikv-32160/log/data' `, ` tikv_servers: - host: n1 log_dir: 'tikv-log' data_dir: 'tikv-log/data' `, ` tikv_servers: - host: n1 data_dir: '/home/tidb6wu/tidb1-data/tikv-32160/log' tidb_servers: - host: n1 log_dir: '/home/tidb6wu/tidb1-data/tikv-32160/log/data' `, ` tikv_servers: - host: n1 log_dir: '/home/tidb6wu/tidb1-data/tikv-32160/log' tidb_servers: - host: n1 log_dir: '/home/tidb6wu/tidb1-data/tikv-32160/log/log' `, ` tikv_servers: - host: n1 data_dir: '/home/tidb6wu/tidb1-data/tikv-32160/data' pd_servers: - host: n1 data_dir: '/home/tidb6wu/tidb1-data/tikv-32160' `, ` global: user: "test1" deploy_dir: deploy data_dir: data tikv_servers: - host: n1 log_dir: "/home/test1/deploy/tikv-20160/ddd/log" data_dir: "ddd" `, ` global: user: "test1" deploy_dir: deploy data_dir: data tikv_servers: - host: n1 log_dir: "log" data_dir: "/home/test1/deploy/tikv-20160/log/data" `, } for _, s := range overlapTopos { err = yaml.Unmarshal([]byte(s), &topo) require.NoError(t, err) err = CheckClusterDirConflict(clsList, "topo", &topo) require.Error(t, err) require.Equal(t, "spec.deploy.dir_overlap: Deploy directory overlaps to another instance", err.Error()) } } tiup-1.16.3/pkg/cluster/task/000077500000000000000000000000001505422223000157625ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/task/action.go000066400000000000000000000036521505422223000175740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "crypto/tls" "fmt" "github.com/pingcap/errors" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" ) // ClusterOperate represents the cluster operation task. type ClusterOperate struct { spec *spec.Specification op operator.Operation options operator.Options tlsCfg *tls.Config } // Execute implements the Task interface func (c *ClusterOperate) Execute(ctx context.Context) error { var ( err error ) var opErrMsg = [...]string{ "failed to start", "failed to stop", "failed to restart", "failed to destroy", "failed to upgrade", "failed to scale in", "failed to scale out", "failed to destroy tombstone", } switch c.op { case operator.ScaleInOperation: err = operator.ScaleIn(ctx, c.spec, c.options, c.tlsCfg) // printStatus = false case operator.DestroyTombstoneOperation: _, err = operator.DestroyTombstone(ctx, c.spec, false, c.options, c.tlsCfg) default: return errors.Errorf("nonsupport %s", c.op) } if err != nil { return errors.Annotatef(err, "%s", opErrMsg[c.op]) } return nil } // Rollback implements the Task interface func (c *ClusterOperate) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *ClusterOperate) String() string { return fmt.Sprintf("ClusterOperate: operation=%s, options=%+v", c.op, c.options) } tiup-1.16.3/pkg/cluster/task/backup_component.go000066400000000000000000000043761505422223000216520ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "bytes" "context" "fmt" "path/filepath" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // BackupComponent is used to copy all files related the specific version a component // to the target directory of path type BackupComponent struct { component string fromVer string host string deployDir string } // Execute implements the Task interface func (c *BackupComponent) Execute(ctx context.Context) error { // Copy to remote server exec, found := ctxt.GetInner(ctx).GetExecutor(c.host) if !found { return ErrNoExecutor } binDir := filepath.Join(c.deployDir, "bin") // Make upgrade idempotent // The old version has been backup if upgrade abort cmd := fmt.Sprintf(`test -d %[2]s || cp -r %[1]s %[2]s`, binDir, binDir+".old."+c.fromVer) _, stderr, err := exec.Execute(ctx, cmd, false) if err != nil { // ignore error if the source path does not exist, this is possible when // there are multiple instances share the same deploy_dir, typical case // is imported cluster // NOTE: by changing the behaviour to cp instead of mv in line 45, we don't // need to check "no such file" anymore, but I'm keeping it here in case // we got a better way handling the backups later if !(bytes.Contains(stderr, []byte("No such file or directory")) || bytes.Contains(stderr, []byte("File exists"))) { return errors.Annotate(err, cmd) } } return nil } // Rollback implements the Task interface func (c *BackupComponent) Rollback(ctx context.Context) error { return nil } // String implements the fmt.Stringer interface func (c *BackupComponent) String() string { return fmt.Sprintf("BackupComponent: component=%s, currentVersion=%s, remote=%s:%s", c.component, c.fromVer, c.host, c.deployDir) } tiup-1.16.3/pkg/cluster/task/builder.go000066400000000000000000000365671505422223000177600ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "crypto/tls" "fmt" "path/filepath" "github.com/pingcap/tiup/pkg/cluster/executor" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/crypto" "github.com/pingcap/tiup/pkg/environment" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) // Builder is used to build TiUP task type Builder struct { tasks []Task Logger *logprinter.Logger } // NewBuilder returns a *Builder instance func NewBuilder(logger *logprinter.Logger) *Builder { return &Builder{Logger: logger} } // RootSSH appends a RootSSH task to the current task collection func (b *Builder) RootSSH( host string, port int, user, password, keyFile, passphrase string, sshTimeout, exeTimeout uint64, proxyHost string, proxyPort int, proxyUser, proxyPassword, proxyKeyFile, proxyPassphrase string, proxySSHTimeout uint64, sshType, defaultSSHType executor.SSHType, sudo bool, ) *Builder { if sshType == "" { sshType = defaultSSHType } b.tasks = append(b.tasks, &RootSSH{ host: host, port: port, user: user, password: password, keyFile: keyFile, passphrase: passphrase, timeout: sshTimeout, exeTimeout: exeTimeout, proxyHost: proxyHost, proxyPort: proxyPort, proxyUser: proxyUser, proxyPassword: proxyPassword, proxyKeyFile: proxyKeyFile, proxyPassphrase: proxyPassphrase, proxyTimeout: proxySSHTimeout, sshType: sshType, sudo: sudo, }) return b } // NewSimpleUerSSH append a UserSSH task to the current task collection with operator.Options and SSHConnectionProps func NewSimpleUerSSH(logger *logprinter.Logger, host string, port int, user string, gOpt operator.Options, p *tui.SSHConnectionProps, sshType executor.SSHType) *Builder { return NewBuilder(logger). UserSSH( host, port, user, gOpt.SSHTimeout, gOpt.OptTimeout, gOpt.SSHProxyHost, gOpt.SSHProxyPort, gOpt.SSHProxyUser, p.Password, p.IdentityFile, p.IdentityFilePassphrase, gOpt.SSHProxyTimeout, gOpt.SSHType, sshType, ) } // UserSSH append a UserSSH task to the current task collection func (b *Builder) UserSSH( host string, port int, deployUser string, sshTimeout, exeTimeout uint64, proxyHost string, proxyPort int, proxyUser, proxyPassword, proxyKeyFile, proxyPassphrase string, proxySSHTimeout uint64, sshType, defaultSSHType executor.SSHType, ) *Builder { if sshType == "" { sshType = defaultSSHType } b.tasks = append(b.tasks, &UserSSH{ host: host, port: port, deployUser: deployUser, timeout: sshTimeout, exeTimeout: exeTimeout, proxyHost: proxyHost, proxyPort: proxyPort, proxyUser: proxyUser, proxyPassword: proxyPassword, proxyKeyFile: proxyKeyFile, proxyPassphrase: proxyPassphrase, proxyTimeout: proxySSHTimeout, sshType: sshType, }) return b } // Func append a func task. func (b *Builder) Func(name string, fn func(ctx context.Context) error) *Builder { b.tasks = append(b.tasks, &Func{ name: name, fn: fn, }) return b } // ClusterSSH init all UserSSH need for the cluster. func (b *Builder) ClusterSSH( topo spec.Topology, deployUser string, sshTimeout, exeTimeout uint64, proxyHost string, proxyPort int, proxyUser, proxyPassword, proxyKeyFile, proxyPassphrase string, proxySSHTimeout uint64, sshType, defaultSSHType executor.SSHType, ) *Builder { if sshType == "" { sshType = defaultSSHType } var tasks []Task topo.IterInstance(func(inst spec.Instance) { tasks = append(tasks, &UserSSH{ host: inst.GetManageHost(), port: inst.GetSSHPort(), deployUser: deployUser, timeout: sshTimeout, exeTimeout: exeTimeout, proxyHost: proxyHost, proxyPort: proxyPort, proxyUser: proxyUser, proxyPassword: proxyPassword, proxyKeyFile: proxyKeyFile, proxyPassphrase: proxyPassphrase, proxyTimeout: proxySSHTimeout, sshType: sshType, }) }) b.tasks = append(b.tasks, &Parallel{inner: tasks}) return b } // UpdateMeta maintain the meta information func (b *Builder) UpdateMeta(cluster string, metadata *spec.ClusterMeta, deletedNodeIDs []string) *Builder { b.tasks = append(b.tasks, &UpdateMeta{ cluster: cluster, metadata: metadata, deletedNodeIDs: deletedNodeIDs, }) return b } // UpdateTopology maintain the topology information func (b *Builder) UpdateTopology(cluster, profile string, metadata *spec.ClusterMeta, deletedNodeIDs []string) *Builder { b.tasks = append(b.tasks, &UpdateTopology{ metadata: metadata, cluster: cluster, profileDir: profile, deletedNodeIDs: deletedNodeIDs, tcpProxy: proxy.GetTCPProxy(), }) return b } // CopyFile appends a CopyFile task to the current task collection func (b *Builder) CopyFile(src, dst, server string, download bool, limit int, compress bool) *Builder { b.tasks = append(b.tasks, &CopyFile{ src: src, dst: dst, remote: server, download: download, limit: limit, compress: compress, }) return b } // Download appends a Downloader task to the current task collection func (b *Builder) Download(component, os, arch string, version string) *Builder { b.tasks = append(b.tasks, NewDownloader(component, os, arch, version)) return b } // CopyComponent appends a CopyComponent task to the current task collection func (b *Builder) CopyComponent(component, os, arch string, version string, srcPath, dstHost, dstDir string, ) *Builder { b.tasks = append(b.tasks, &CopyComponent{ component: component, os: os, arch: arch, version: version, srcPath: srcPath, host: dstHost, dstDir: dstDir, }) return b } // InstallPackage appends a InstallPackage task to the current task collection func (b *Builder) InstallPackage(srcPath, dstHost, dstDir string) *Builder { b.tasks = append(b.tasks, &InstallPackage{ srcPath: srcPath, host: dstHost, dstDir: dstDir, }) return b } // BackupComponent appends a BackupComponent task to the current task collection func (b *Builder) BackupComponent(component, fromVer string, host, deployDir string) *Builder { b.tasks = append(b.tasks, &BackupComponent{ component: component, fromVer: fromVer, host: host, deployDir: deployDir, }) return b } // InitConfig appends a CopyComponent task to the current task collection func (b *Builder) InitConfig(clusterName, version string, specManager *spec.SpecManager, inst spec.Instance, deployUser string, ignoreCheck bool, paths meta.DirPaths) *Builder { // get nightly version var componentVersion utils.Version meta := specManager.NewMetadata() // full version componentVersion = utils.Version(version) if err := specManager.Metadata(clusterName, meta); err == nil { // get nightly version if version == utils.NightlyVersionAlias { componentVersion, _, err = environment.GlobalEnv().V1Repository().LatestNightlyVersion(inst.ComponentSource()) if err != nil { componentVersion = utils.Version(version) } } // dm cluster does not require a full nightly version if meta.GetTopology().Type() == spec.TopoTypeDM { componentVersion = utils.Version(version) } } b.tasks = append(b.tasks, &InitConfig{ specManager: specManager, clusterName: clusterName, clusterVersion: string(componentVersion), instance: inst, deployUser: deployUser, ignoreCheck: ignoreCheck, paths: paths, }) return b } // ScaleConfig generate temporary config on scaling func (b *Builder) ScaleConfig(clusterName, clusterVersion string, specManager *spec.SpecManager, topo spec.Topology, inst spec.Instance, deployUser string, paths meta.DirPaths) *Builder { b.tasks = append(b.tasks, &ScaleConfig{ specManager: specManager, clusterName: clusterName, clusterVersion: clusterVersion, base: topo, instance: inst, deployUser: deployUser, paths: paths, }) return b } // MonitoredConfig appends a CopyComponent task to the current task collection func (b *Builder) MonitoredConfig(name, comp, host string, globResCtl meta.ResourceControl, options *spec.MonitoredOptions, deployUser string, tlsEnabled bool, paths meta.DirPaths, systemdMode spec.SystemdMode) *Builder { b.tasks = append(b.tasks, &MonitoredConfig{ name: name, component: comp, host: host, globResCtl: globResCtl, options: options, deployUser: deployUser, tlsEnabled: tlsEnabled, paths: paths, systemdMode: systemdMode, }) return b } // SSHKeyGen appends a SSHKeyGen task to the current task collection func (b *Builder) SSHKeyGen(keypath string) *Builder { b.tasks = append(b.tasks, &SSHKeyGen{ keypath: keypath, }) return b } // SSHKeySet appends a SSHKeySet task to the current task collection func (b *Builder) SSHKeySet(privKeyPath, pubKeyPath string) *Builder { b.tasks = append(b.tasks, &SSHKeySet{ privateKeyPath: privKeyPath, publicKeyPath: pubKeyPath, }) return b } // EnvInit appends a EnvInit task to the current task collection func (b *Builder) EnvInit(host, deployUser string, userGroup string, skipCreateUser bool, sudo bool) *Builder { b.tasks = append(b.tasks, &EnvInit{ host: host, deployUser: deployUser, userGroup: userGroup, skipCreateUser: skipCreateUser, sudo: sudo, }) return b } // RotateSSH appends a RotateSSH task to the current task collection func (b *Builder) RotateSSH(host, deployUser, newPublicKeyPath string) *Builder { b.tasks = append(b.tasks, &RotateSSH{ host: host, deployUser: deployUser, newPublicKeyPath: newPublicKeyPath, }) return b } // ClusterOperate appends a cluster operation task. // All the UserSSH needed must be init first. func (b *Builder) ClusterOperate( spec *spec.Specification, op operator.Operation, options operator.Options, tlsCfg *tls.Config, ) *Builder { b.tasks = append(b.tasks, &ClusterOperate{ spec: spec, op: op, options: options, tlsCfg: tlsCfg, }) return b } // Mkdir appends a Mkdir task to the current task collection func (b *Builder) Mkdir(user, host string, sudo bool, dirs ...string) *Builder { b.tasks = append(b.tasks, &Mkdir{ user: user, host: host, dirs: dirs, sudo: sudo, }) return b } // Rmdir appends a Rmdir task to the current task collection func (b *Builder) Rmdir(host string, dirs ...string) *Builder { b.tasks = append(b.tasks, &Rmdir{ host: host, dirs: dirs, }) return b } // Shell command on cluster host func (b *Builder) Shell(host, command, cmdID string, sudo bool) *Builder { b.tasks = append(b.tasks, &Shell{ host: host, command: command, sudo: sudo, cmdID: cmdID, }) return b } // SystemCtl run systemctl on host func (b *Builder) SystemCtl(host, unit, action string, daemonReload, checkActive bool, scope string) *Builder { b.tasks = append(b.tasks, &SystemCtl{ host: host, unit: unit, action: action, daemonReload: daemonReload, checkactive: checkActive, scope: scope, }) return b } // Sysctl set a kernel parameter func (b *Builder) Sysctl(host, key, val string, sudo bool) *Builder { b.tasks = append(b.tasks, &Sysctl{ host: host, key: key, val: val, sudo: sudo, }) return b } // Limit set a system limit func (b *Builder) Limit(host, domain, limit, item, value string, sudo bool) *Builder { b.tasks = append(b.tasks, &Limit{ host: host, domain: domain, limit: limit, item: item, value: value, sudo: sudo, }) return b } // CheckSys checks system information of deploy server func (b *Builder) CheckSys(host, dir, checkType string, topo *spec.Specification, opt *operator.CheckOptions) *Builder { b.tasks = append(b.tasks, &CheckSys{ host: host, topo: topo, opt: opt, checkDir: dir, check: checkType, }) return b } // DeploySpark deployes spark as dependency of TiSpark func (b *Builder) DeploySpark(inst spec.Instance, sparkVersion, srcPath, deployDir string) *Builder { sparkSubPath := spec.ComponentSubDir(spec.ComponentSpark, sparkVersion) return b.CopyComponent( spec.ComponentSpark, inst.OS(), inst.Arch(), sparkVersion, srcPath, inst.GetManageHost(), deployDir, ).Shell( // spark is under a subdir, move it to deploy dir inst.GetManageHost(), fmt.Sprintf( "cp -rf %[1]s %[2]s/ && cp -rf %[3]s/* %[2]s/ && rm -rf %[1]s %[3]s", filepath.Join(deployDir, "bin", sparkSubPath), deployDir, filepath.Join(deployDir, sparkSubPath), ), "", false, // (not) sudo ).CopyComponent( inst.ComponentName(), inst.OS(), inst.Arch(), "", // use the latest stable version srcPath, inst.GetManageHost(), deployDir, ).Shell( // move tispark jar to correct path inst.GetManageHost(), fmt.Sprintf( "cp -f %[1]s/*.jar %[2]s/jars/ && rm -f %[1]s/*.jar", filepath.Join(deployDir, "bin"), deployDir, ), "", false, // (not) sudo ) } // TLSCert generates certificate for instance and transfers it to the server func (b *Builder) TLSCert(host, comp, role string, port int, ca *crypto.CertificateAuthority, paths meta.DirPaths) *Builder { b.tasks = append(b.tasks, &TLSCert{ host: host, comp: comp, role: role, port: port, ca: ca, paths: paths, }) return b } // Parallel appends a parallel task to the current task collection func (b *Builder) Parallel(ignoreError bool, tasks ...Task) *Builder { if len(tasks) > 0 { b.tasks = append(b.tasks, &Parallel{ignoreError: ignoreError, inner: tasks}) } return b } // Serial appends the tasks to the tail of queue func (b *Builder) Serial(tasks ...Task) *Builder { if len(tasks) > 0 { b.tasks = append(b.tasks, tasks...) } return b } // Build returns a task that contains all tasks appended by previous operation func (b *Builder) Build() Task { // Serial handles event internally. So the following 3 lines are commented out. // if len(b.tasks) == 1 { // return b.tasks[0] // } return &Serial{inner: b.tasks} } // Step appends a new StepDisplay task, which will print single line progress for inner tasks. func (b *Builder) Step(prefix string, inner Task, logger *logprinter.Logger) *Builder { b.Serial(newStepDisplay(prefix, inner, logger)) return b } // ParallelStep appends a new ParallelStepDisplay task, which will print multi line progress in parallel // for inner tasks. Inner tasks must be a StepDisplay task. func (b *Builder) ParallelStep(prefix string, ignoreError bool, tasks ...*StepDisplay) *Builder { b.tasks = append(b.tasks, newParallelStepDisplay(prefix, ignoreError, tasks...).SetLogger(b.Logger)) return b } // BuildAsStep returns a task that is wrapped by a StepDisplay. The task will print single line progress. func (b *Builder) BuildAsStep(prefix string) *StepDisplay { inner := b.Build() return newStepDisplay(prefix, inner, b.Logger) } tiup-1.16.3/pkg/cluster/task/check.go000066400000000000000000000214311505422223000173670ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/cluster/spec" ) // the check types var ( CheckTypeSystemInfo = "insight" CheckTypeSystemLimits = "limits" CheckTypeSystemConfig = "system" CheckTypePort = "port" CheckTypeService = "service" CheckTypePackage = "package" CheckTypePartitions = "partitions" CheckTypeFIO = "fio" CheckTypePermission = "permission" ChecktypeIsExist = "exist" CheckTypeTimeZone = "timezone" ) // place the check utilities are stored var ( CheckToolsPathDir = "/tmp/tiup" ) // CheckSys performs checks of system information type CheckSys struct { host string topo *spec.Specification opt *operator.CheckOptions check string // check type name checkDir string } func storeResults(ctx context.Context, host string, results []*operator.CheckResult) { rr := []any{} for _, r := range results { rr = append(rr, r) } ctxt.GetInner(ctx).SetCheckResults(host, rr) } // Execute implements the Task interface func (c *CheckSys) Execute(ctx context.Context) error { stdout, stderr, _ := ctxt.GetInner(ctx).GetOutputs(c.host) if len(stderr) > 0 && len(stdout) == 0 { return ErrNoOutput } sudo := true if c.topo.BaseTopo().GlobalOptions.SystemdMode == spec.UserMode { sudo = false } switch c.check { case CheckTypeSystemInfo: storeResults(ctx, c.host, operator.CheckSystemInfo(c.opt, stdout)) case CheckTypeSystemLimits: storeResults(ctx, c.host, operator.CheckSysLimits(c.opt, c.topo.GlobalOptions.User, stdout)) case CheckTypeSystemConfig: results := operator.CheckKernelParameters(c.opt, stdout) e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { return ErrNoExecutor } results = append( results, operator.CheckSELinux(ctx, e, sudo), operator.CheckTHP(ctx, e, sudo), ) storeResults(ctx, c.host, results) case CheckTypePort: storeResults(ctx, c.host, operator.CheckListeningPort(c.opt, c.host, c.topo, stdout)) case CheckTypeService: e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { return ErrNoExecutor } var results []*operator.CheckResult // check services results = append( results, operator.CheckServices(ctx, e, c.host, "irqbalance", false, spec.SystemdMode(string(c.topo.BaseTopo().GlobalOptions.SystemdMode))), // FIXME: set firewalld rules in deploy, and not disabling it anymore operator.CheckServices(ctx, e, c.host, "firewalld", true, spec.SystemdMode(string(c.topo.BaseTopo().GlobalOptions.SystemdMode))), ) storeResults(ctx, c.host, results) case CheckTypePackage: // check if a command present, and if a package installed e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { return ErrNoExecutor } var results []*operator.CheckResult // check if numactl is installed stdout, stderr, err := e.Execute(ctx, "numactl --show", false) if err != nil || len(stderr) > 0 { results = append(results, &operator.CheckResult{ Name: operator.CheckNameCommand, Err: fmt.Errorf("numactl not usable, %s", strings.Trim(string(stderr), "\n")), Msg: "numactl is not installed properly", }) } else { results = append(results, &operator.CheckResult{ Name: operator.CheckNameCommand, Msg: "numactl: " + strings.Split(string(stdout), "\n")[0], }) } // check if JRE is available for TiSpark results = append(results, operator.CheckJRE(ctx, e, c.host, c.topo)...) storeResults(ctx, c.host, results) case CheckTypePartitions: // check partition mount options for data_dir storeResults(ctx, c.host, operator.CheckPartitions(c.opt, c.host, c.topo, stdout)) case CheckTypeFIO: if !c.opt.EnableDisk || c.checkDir == "" { break } rr, rw, lat, err := c.runFIO(ctx) if err != nil { return err } storeResults(ctx, c.host, operator.CheckFIOResult(rr, rw, lat)) case CheckTypePermission: e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { return ErrNoExecutor } storeResults(ctx, c.host, operator.CheckDirPermission(ctx, e, c.topo.GlobalOptions.User, c.checkDir)) case ChecktypeIsExist: e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { return ErrNoExecutor } // check partition mount options for data_dir storeResults(ctx, c.host, operator.CheckDirIsExist(ctx, e, c.checkDir)) case CheckTypeTimeZone: storeResults(ctx, c.host, operator.CheckTimeZone(ctx, c.topo, c.host, stdout)) } return nil } // Rollback implements the Task interface func (c *CheckSys) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *CheckSys) String() string { return fmt.Sprintf("CheckSys: host=%s type=%s", c.host, c.check) } // runFIO performs FIO checks func (c *CheckSys) runFIO(ctx context.Context) (outRR []byte, outRW []byte, outLat []byte, err error) { e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { err = ErrNoExecutor return } checkDir := spec.Abs(c.topo.GlobalOptions.User, c.checkDir) testWd := filepath.Join(checkDir, "tiup-fio-test") fioBin := filepath.Join(CheckToolsPathDir, "bin", "fio") notExistDir := testWd for { parent := filepath.Dir(notExistDir) if len(parent) <= 1 { break } results := operator.CheckDirIsExist(ctx, e, parent) if len(results) > 0 { break } notExistDir = parent } var stderr []byte // rand read var ( fileRR = "fio_randread_test.txt" resRR = "fio_randread_result.json" ) cmdRR := strings.Join([]string{ fmt.Sprintf("mkdir -p %s && cd %s", testWd, testWd), fmt.Sprintf("rm -f %s %s", fileRR, resRR), // cleanup any legancy files strings.Join([]string{ fioBin, "-ioengine=psync", "-bs=32k", "-fdatasync=1", "-thread", "-rw=randread", "-name='fio randread test'", "-iodepth=4", "-runtime=60", "-numjobs=4", fmt.Sprintf("-filename=%s", fileRR), "-size=1G", "-group_reporting", "--output-format=json", fmt.Sprintf("--output=%s", resRR), "> /dev/null", // ignore output }, " "), fmt.Sprintf("cat %s", resRR), }, " && ") outRR, stderr, err = e.Execute(ctx, cmdRR, false, time.Second*600) if err != nil { return } if len(stderr) > 0 { err = fmt.Errorf("%s", stderr) return } // rand read write var ( fileRW = "fio_randread_write_test.txt" resRW = "fio_randread_write_test.json" ) cmdRW := strings.Join([]string{ fmt.Sprintf("mkdir -p %s && cd %s", testWd, testWd), fmt.Sprintf("rm -f %s %s", fileRW, resRW), // cleanup any legancy files strings.Join([]string{ fioBin, "-ioengine=psync", "-bs=32k", "-fdatasync=1", "-thread", "-rw=randrw", "-percentage_random=100,0", "-name='fio mixed randread and sequential write test'", "-iodepth=4", "-runtime=60", "-numjobs=4", fmt.Sprintf("-filename=%s", fileRW), "-size=1G", "-group_reporting", "--output-format=json", fmt.Sprintf("--output=%s", resRW), "> /dev/null", // ignore output }, " "), fmt.Sprintf("cat %s", resRW), }, " && ") outRW, stderr, err = e.Execute(ctx, cmdRW, false, time.Second*600) if err != nil { return } if len(stderr) > 0 { err = fmt.Errorf("%s", stderr) return } // rand read write var ( fileLat = "fio_randread_write_latency_test.txt" resLat = "fio_randread_write_latency_test.json" ) cmdLat := strings.Join([]string{ fmt.Sprintf("mkdir -p %s && cd %s", testWd, testWd), fmt.Sprintf("rm -f %s %s", fileLat, resLat), // cleanup any legancy files strings.Join([]string{ fioBin, "-ioengine=psync", "-bs=32k", "-fdatasync=1", "-thread", "-rw=randrw", "-percentage_random=100,0", "-name='fio mixed randread and sequential write test'", "-iodepth=1", "-runtime=60", "-numjobs=1", fmt.Sprintf("-filename=%s", fileLat), "-size=1G", "-group_reporting", "--output-format=json", fmt.Sprintf("--output=%s", resLat), "> /dev/null", // ignore output }, " "), fmt.Sprintf("cat %s", resLat), }, " && ") outLat, stderr, err = e.Execute(ctx, cmdLat, false, time.Second*600) if err != nil { return } if len(stderr) > 0 { err = fmt.Errorf("%s", stderr) return } // cleanup _, stderr, err = e.Execute( ctx, fmt.Sprintf("rm -rf %s", notExistDir), false, ) if err != nil { return } if len(stderr) > 0 { err = fmt.Errorf("%s", stderr) } return } tiup-1.16.3/pkg/cluster/task/copy_component.go000066400000000000000000000040031505422223000213420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/repository" ) // CopyComponent is used to copy all files related the specific version a component // to the target directory of path type CopyComponent struct { component string os string arch string version string host string srcPath string dstDir string } // Execute implements the Task interface func (c *CopyComponent) Execute(ctx context.Context) error { // If the version is not specified, the last stable one will be used if c.version == "" { env := environment.GlobalEnv() ver, _, err := env.V1Repository().WithOptions(repository.Options{ GOOS: c.os, GOARCH: c.arch, }).LatestStableVersion(c.component, false) if err != nil { return err } c.version = string(ver) } // Copy to remote server srcPath := c.srcPath if srcPath == "" { srcPath = spec.PackagePath(c.component, c.version, c.os, c.arch) } install := &InstallPackage{ srcPath: srcPath, host: c.host, dstDir: c.dstDir, } return install.Execute(ctx) } // Rollback implements the Task interface func (c *CopyComponent) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *CopyComponent) String() string { return fmt.Sprintf("CopyComponent: component=%s, version=%s, remote=%s:%s os=%s, arch=%s", c.component, c.version, c.host, c.dstDir, c.os, c.arch) } tiup-1.16.3/pkg/cluster/task/copy_file.go000066400000000000000000000030261505422223000202630ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // CopyFile will copy a local file to the target host type CopyFile struct { src string dst string remote string download bool limit int compress bool } // Execute implements the Task interface func (c *CopyFile) Execute(ctx context.Context) error { e, ok := ctxt.GetInner(ctx).GetExecutor(c.remote) if !ok { return ErrNoExecutor } err := e.Transfer(ctx, c.src, c.dst, c.download, c.limit, c.compress) if err != nil { return errors.Annotate(err, "failed to transfer file") } return nil } // Rollback implements the Task interface func (c *CopyFile) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *CopyFile) String() string { if c.download { return fmt.Sprintf("CopyFile: remote=%s:%s, local=%s", c.remote, c.src, c.dst) } return fmt.Sprintf("CopyFile: local=%s, remote=%s:%s", c.src, c.remote, c.dst) } tiup-1.16.3/pkg/cluster/task/download.go000066400000000000000000000040661505422223000201260ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" operator "github.com/pingcap/tiup/pkg/cluster/operation" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/repository" ) // Downloader is used to download the specific version of a component from // the repository, there is nothing to do if the specified version exists. type Downloader struct { component string os string arch string version string } // NewDownloader create a Downloader instance. func NewDownloader(component string, os string, arch string, version string) *Downloader { return &Downloader{ component: component, os: os, arch: arch, version: version, } } // Execute implements the Task interface func (d *Downloader) Execute(_ context.Context) error { // If the version is not specified, the last stable one will be used if d.version == "" { env := environment.GlobalEnv() ver, _, err := env.V1Repository().WithOptions(repository.Options{ GOOS: d.os, GOARCH: d.arch, }).LatestStableVersion(d.component, false) if err != nil { return err } d.version = string(ver) } return operator.Download(d.component, d.os, d.arch, d.version) } // Rollback implements the Task interface func (d *Downloader) Rollback(ctx context.Context) error { // We cannot delete the component because of some versions maybe exists before return nil } // String implements the fmt.Stringer interface func (d *Downloader) String() string { return fmt.Sprintf("Download: component=%s, version=%s, os=%s, arch=%s", d.component, d.version, d.os, d.arch) } tiup-1.16.3/pkg/cluster/task/env_init.go000066400000000000000000000063561505422223000201360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "os" "strings" "github.com/joomcode/errorx" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" "github.com/pingcap/tiup/pkg/cluster/module" ) var ( errNSEnvInit = errNS.NewSubNamespace("env_init") errEnvInitSubCommandFailed = errNSEnvInit.NewType("sub_command_failed") // ErrEnvInitFailed is ErrEnvInitFailed ErrEnvInitFailed = errNSEnvInit.NewType("failed") ) // EnvInit is used to initialize the remote environment, e.g: // 1. Generate SSH key // 2. ssh-copy-id type EnvInit struct { host string deployUser string userGroup string skipCreateUser bool sudo bool } // Execute implements the Task interface func (e *EnvInit) Execute(ctx context.Context) error { return e.exec(ctx) } func (e *EnvInit) exec(ctx context.Context) error { wrapError := func(err error) *errorx.Error { return ErrEnvInitFailed.Wrap(err, "Failed to initialize TiDB environment on remote host '%s'", e.host) } exec, found := ctxt.GetInner(ctx).GetExecutor(e.host) if !found { panic(ErrNoExecutor) } if !e.skipCreateUser { um := module.NewUserModule(module.UserModuleConfig{ Action: module.UserActionAdd, Name: e.deployUser, Group: e.userGroup, Sudoer: true, }) _, _, errx := um.Execute(ctx, exec) if errx != nil { return wrapError(errx) } } pubKey, err := os.ReadFile(ctxt.GetInner(ctx).PublicKeyPath) if err != nil { return wrapError(err) } // Authorize var cmd string if e.sudo { cmd = `su - ` + e.deployUser + ` -c 'mkdir -p ~/.ssh && chmod 700 ~/.ssh'` } else { cmd = `mkdir -p ~/.ssh && chmod 700 ~/.ssh` } _, _, err = exec.Execute(ctx, cmd, e.sudo) if err != nil { return wrapError(errEnvInitSubCommandFailed. Wrap(err, "Failed to create '~/.ssh' directory for user '%s'", e.deployUser)) } pk := strings.TrimSpace(string(pubKey)) sshAuthorizedKeys := executor.FindSSHAuthorizedKeysFile(ctx, exec) if e.sudo { cmd = fmt.Sprintf(`su - %[1]s -c 'grep $(echo %[2]s) %[3]s || echo %[2]s >> %[3]s && chmod 600 %[3]s'`, e.deployUser, pk, sshAuthorizedKeys) } else { cmd = fmt.Sprintf(`grep $(echo %[1]s) %[2]s || echo %[1]s >> %[2]s && chmod 600 %[2]s`, pk, sshAuthorizedKeys) } _, _, err = exec.Execute(ctx, cmd, e.sudo) if err != nil { return wrapError(errEnvInitSubCommandFailed. Wrap(err, "Failed to write public keys to '%s' for user '%s'", sshAuthorizedKeys, e.deployUser)) } return nil } // Rollback implements the Task interface func (e *EnvInit) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (e *EnvInit) String() string { return fmt.Sprintf("EnvInit: user=%s, host=%s", e.deployUser, e.host) } tiup-1.16.3/pkg/cluster/task/func.go000066400000000000000000000021521505422223000172440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import "context" // Func wrap a closure. type Func struct { name string fn func(ctx context.Context) error } // NewFunc create a Func task func NewFunc(name string, fn func(ctx context.Context) error) *Func { return &Func{ name: name, fn: fn, } } // Execute implements the Task interface func (m *Func) Execute(ctx context.Context) error { return m.fn(ctx) } // Rollback implements the Task interface func (m *Func) Rollback(_ context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (m *Func) String() string { return m.name } tiup-1.16.3/pkg/cluster/task/init_config.go000066400000000000000000000042451505422223000206060ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" ) // InitConfig is used to copy all configurations to the target directory of path type InitConfig struct { specManager *spec.SpecManager clusterName string clusterVersion string instance spec.Instance deployUser string ignoreCheck bool paths meta.DirPaths } // Execute implements the Task interface func (c *InitConfig) Execute(ctx context.Context) error { // Copy to remote server exec, found := ctxt.GetInner(ctx).GetExecutor(c.instance.GetManageHost()) if !found { return ErrNoExecutor } if err := utils.MkdirAll(c.paths.Cache, 0755); err != nil { return errors.Annotatef(err, "create cache directory failed: %s", c.paths.Cache) } err := c.instance.InitConfig(ctx, exec, c.clusterName, c.clusterVersion, c.deployUser, c.paths) if err != nil { if c.ignoreCheck && errors.Cause(err) == spec.ErrorCheckConfig { return nil } return errors.Annotatef(err, "init config failed: %s:%d", c.instance.GetManageHost(), c.instance.GetPort()) } return nil } // Rollback implements the Task interface func (c *InitConfig) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *InitConfig) String() string { return fmt.Sprintf("InitConfig: cluster=%s, user=%s, host=%s, path=%s, %s", c.clusterName, c.deployUser, c.instance.GetManageHost(), c.specManager.Path(c.clusterName, spec.TempConfigPath, c.instance.ServiceName()), c.paths) } tiup-1.16.3/pkg/cluster/task/init_config_test.go000066400000000000000000000046231505422223000216450ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "testing" "time" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/spec" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils/mock" "github.com/pingcap/errors" "github.com/stretchr/testify/require" ) type fakeExecutor struct{} func (e *fakeExecutor) Execute(ctx context.Context, cmd string, sudo bool, timeout ...time.Duration) (stdout []byte, stderr []byte, err error) { return []byte{}, []byte{}, nil } func (e *fakeExecutor) Transfer(ctx context.Context, src, dst string, download bool, limit int, compress bool) error { return nil } type fakeInstance struct { hasConfigError bool *spec.TiDBInstance } func (i *fakeInstance) InitConfig( ctx context.Context, e ctxt.Executor, clusterName string, clusterVersion string, deployUser string, paths meta.DirPaths, ) error { if i.hasConfigError { return errors.Annotate(spec.ErrorCheckConfig, "test error") } return nil } func (i *fakeInstance) GetHost() string { return "1.1.1.1" } func (i *fakeInstance) GetPort() int { return 4000 } func (i *fakeInstance) GetManageHost() string { return "1.1.1.1" } func TestCheckConfig(t *testing.T) { ctx := ctxt.New(context.Background(), 0, logprinter.NewLogger("")) mf := mock.With("FakeExecutor", &fakeExecutor{}) defer mf() initCfg := &InitConfig{ clusterName: "test-cluster-name", clusterVersion: "v6.0.0", paths: meta.DirPaths{ Cache: "/tmp", }, } tests := [][]bool{ {false, false, false}, // hasConfigError, ignoreConfigError, expectError {true, false, true}, {false, true, false}, {true, true, false}, } for _, test := range tests { initCfg.instance = &fakeInstance{test[0], nil} initCfg.ignoreCheck = test[1] if test[2] { require.Error(t, initCfg.Execute(ctx)) } else { require.NoError(t, initCfg.Execute(ctx)) } } } tiup-1.16.3/pkg/cluster/task/install_package.go000066400000000000000000000040461505422223000214360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "bytes" "context" "fmt" "path" "path/filepath" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // InstallPackage is used to copy all files related the specific version a component // to the target directory of path type InstallPackage struct { srcPath string host string dstDir string } // Execute implements the Task interface func (c *InstallPackage) Execute(ctx context.Context) error { // Install package to remote server exec, found := ctxt.GetInner(ctx).GetExecutor(c.host) if !found { return ErrNoExecutor } dstDir := filepath.Join(c.dstDir, "bin") dstPath := filepath.Join(dstDir, path.Base(c.srcPath)) err := exec.Transfer(ctx, c.srcPath, dstPath, false, 0, false) if err != nil { return errors.Annotatef(err, "failed to scp %s to %s:%s", c.srcPath, c.host, dstPath) } cmd := fmt.Sprintf(`tar --no-same-owner -zxf %s -C %s && rm %s`, dstPath, dstDir, dstPath) _, stderr, err := exec.Execute(ctx, cmd, false) if err != nil { if bytes.Contains(stderr, []byte("command not found")) { return errors.Errorf("tar command was not found on %s, please install it", c.host) } return errors.Annotatef(err, "stderr: %s", string(stderr)) } return nil } // Rollback implements the Task interface func (c *InstallPackage) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *InstallPackage) String() string { return fmt.Sprintf("InstallPackage: srcPath=%s, remote=%s:%s", c.srcPath, c.host, c.dstDir) } tiup-1.16.3/pkg/cluster/task/limits.go000066400000000000000000000034731505422223000176210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) var ( limitsFilePath = "/etc/security/limits.conf" ) // Limit set a system limit on host type Limit struct { host string domain string // user or group limit string // limit type item string value string sudo bool } // Execute implements the Task interface func (l *Limit) Execute(ctx context.Context) error { e, ok := ctxt.GetInner(ctx).GetExecutor(l.host) if !ok { return ErrNoExecutor } cmd := strings.Join([]string{ fmt.Sprintf("cp %s{,.bak} 2>/dev/null", limitsFilePath), fmt.Sprintf("sed -i '/%s\\s*%s\\s*%s/d' %s 2>/dev/null", l.domain, l.limit, l.item, limitsFilePath), fmt.Sprintf("echo '%s %s %s %s' >> %s", l.domain, l.limit, l.item, l.value, limitsFilePath), }, " && ") stdout, stderr, err := e.Execute(ctx, cmd, l.sudo) ctxt.GetInner(ctx).SetOutputs(l.host, stdout, stderr) if err != nil { return errors.Trace(err) } return nil } // Rollback implements the Task interface func (l *Limit) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (l *Limit) String() string { return fmt.Sprintf("Limit: host=%s %s %s %s %s", l.host, l.domain, l.limit, l.item, l.value) } tiup-1.16.3/pkg/cluster/task/mkdir.go000066400000000000000000000044171505422223000174250ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // Mkdir is used to create directory on the target host type Mkdir struct { user string host string dirs []string sudo bool } // Execute implements the Task interface func (m *Mkdir) Execute(ctx context.Context) error { exec, found := ctxt.GetInner(ctx).GetExecutor(m.host) if !found { panic(ErrNoExecutor) } for _, dir := range m.dirs { if !strings.HasPrefix(dir, "/") { return fmt.Errorf("dir is a relative path: %s", dir) } if strings.Contains(dir, ",") { return fmt.Errorf("dir name contains invalid characters: %v", dir) } xs := strings.Split(dir, "/") // Create directories recursively // The directory /a/b/c will be flatten to: // test -d /a || (mkdir /a && chown tidb:tidb /a) // test -d /a/b || (mkdir /a/b && chown tidb:tidb /a/b) // test -d /a/b/c || (mkdir /a/b/c && chown tidb:tidb /a/b/c) for i := range xs { if xs[i] == "" { continue } cmd := "" if m.sudo { cmd = fmt.Sprintf( `test -d %[1]s || (mkdir -p %[1]s && chown %[2]s:$(id -g -n %[2]s) %[1]s)`, strings.Join(xs[:i+1], "/"), m.user, ) } else { cmd = fmt.Sprintf( `test -d %[1]s || (mkdir -p %[1]s)`, strings.Join(xs[:i+1], "/"), ) } _, _, err := exec.Execute(ctx, cmd, m.sudo) // use root to create the dir if err != nil { return errors.Trace(err) } } } return nil } // Rollback implements the Task interface func (m *Mkdir) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (m *Mkdir) String() string { return fmt.Sprintf("Mkdir: host=%s, directories='%s'", m.host, strings.Join(m.dirs, "','")) } tiup-1.16.3/pkg/cluster/task/monitored_config.go000066400000000000000000000145051505422223000216430ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "path/filepath" "github.com/google/uuid" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/cluster/template" "github.com/pingcap/tiup/pkg/cluster/template/config" "github.com/pingcap/tiup/pkg/cluster/template/scripts" system "github.com/pingcap/tiup/pkg/cluster/template/systemd" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" ) // MonitoredConfig is used to generate the monitor node configuration type MonitoredConfig struct { name string component string host string globResCtl meta.ResourceControl options *spec.MonitoredOptions deployUser string tlsEnabled bool paths meta.DirPaths systemdMode spec.SystemdMode } // Execute implements the Task interface func (m *MonitoredConfig) Execute(ctx context.Context) error { ports := map[string]int{ spec.ComponentNodeExporter: m.options.NodeExporterPort, spec.ComponentBlackboxExporter: m.options.BlackboxExporterPort, } // Copy to remote server exec, found := ctxt.GetInner(ctx).GetExecutor(m.host) if !found { return ErrNoExecutor } if err := utils.MkdirAll(m.paths.Cache, 0755); err != nil { return err } if err := m.syncMonitoredSystemConfig(ctx, exec, m.component, ports[m.component], m.systemdMode); err != nil { return err } var cfg template.ConfigGenerator switch m.component { case spec.ComponentNodeExporter: if err := m.syncBlackboxConfig(ctx, exec, config.NewBlackboxConfig(m.paths.Deploy, m.tlsEnabled)); err != nil { return err } cfg = scripts. NewNodeExporterScript(m.paths.Deploy, m.paths.Log). WithPort(uint64(m.options.NodeExporterPort)). WithNumaNode(m.options.NumaNode) case spec.ComponentBlackboxExporter: cfg = scripts. NewBlackboxExporterScript(m.paths.Deploy, m.paths.Log). WithPort(uint64(m.options.BlackboxExporterPort)) default: return fmt.Errorf("unknown monitored component %s", m.component) } return m.syncMonitoredScript(ctx, exec, m.component, cfg) } func (m *MonitoredConfig) syncMonitoredSystemConfig(ctx context.Context, exec ctxt.Executor, comp string, port int, systemdMode spec.SystemdMode) (err error) { sysCfg := filepath.Join(m.paths.Cache, fmt.Sprintf("%s-%s-%d.service", comp, m.host, port)) // insert checkpoint point := checkpoint.Acquire(ctx, spec.CopyConfigFile, map[string]any{"config-file": sysCfg}) defer func() { point.Release(err, zap.String("config-file", sysCfg)) }() if point.Hit() != nil { return nil } if len(systemdMode) == 0 { systemdMode = spec.SystemMode } resource := spec.MergeResourceControl(m.globResCtl, m.options.ResourceControl) systemCfg := system.NewConfig(comp, m.deployUser, m.paths.Deploy). WithMemoryLimit(resource.MemoryLimit). WithCPUQuota(resource.CPUQuota). WithLimitCORE(resource.LimitCORE). WithTimeoutStartSec(resource.TimeoutStartSec). WithTimeoutStopSec(resource.TimeoutStopSec). WithIOReadBandwidthMax(resource.IOReadBandwidthMax). WithIOWriteBandwidthMax(resource.IOWriteBandwidthMax). WithSystemdMode(string(systemdMode)) // blackbox_exporter needs cap_net_raw to send ICMP ping packets if comp == spec.ComponentBlackboxExporter { systemCfg.GrantCapNetRaw = true } if err := systemCfg.ConfigToFile(sysCfg); err != nil { return err } tgt := filepath.Join("/tmp", comp+"_"+uuid.New().String()+".service") if err := exec.Transfer(ctx, sysCfg, tgt, false, 0, false); err != nil { return err } systemdDir := "/etc/systemd/system/" sudo := true if systemdMode == spec.UserMode { systemdDir = "~/.config/systemd/user/" sudo = false } if outp, errp, err := exec.Execute(ctx, fmt.Sprintf("mv %s %s%s-%d.service", tgt, systemdDir, comp, port), sudo); err != nil { if len(outp) > 0 { fmt.Println(string(outp)) } if len(errp) > 0 { ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger). Errorf("%s", string(errp)) } return err } // restorecon restores SELinux Contexts // Check with: ls -lZ /path/to/file // If the context is wrong systemctl will complain about a missing unit file // Note that we won't check for errors here because: // - We don't support SELinux in Enforcing mode // - restorecon might not be available (Ubuntu doesn't install SELinux tools by default) cmd := fmt.Sprintf("restorecon %s%s-%d.service", systemdDir, comp, port) exec.Execute(ctx, cmd, sudo) //nolint return nil } func (m *MonitoredConfig) syncMonitoredScript(ctx context.Context, exec ctxt.Executor, comp string, cfg template.ConfigGenerator) error { fp := filepath.Join(m.paths.Cache, fmt.Sprintf("run_%s_%s.sh", comp, m.host)) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(m.paths.Deploy, "scripts", fmt.Sprintf("run_%s.sh", comp)) if err := exec.Transfer(ctx, fp, dst, false, 0, false); err != nil { return err } if _, _, err := exec.Execute(ctx, "chmod +x "+dst, false); err != nil { return err } return nil } func (m *MonitoredConfig) syncBlackboxConfig(ctx context.Context, exec ctxt.Executor, cfg template.ConfigGenerator) error { fp := filepath.Join(m.paths.Cache, fmt.Sprintf("blackbox_%s.yaml", m.host)) if err := cfg.ConfigToFile(fp); err != nil { return err } dst := filepath.Join(m.paths.Deploy, "conf", "blackbox.yml") return exec.Transfer(ctx, fp, dst, false, 0, false) } // Rollback implements the Task interface func (m *MonitoredConfig) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (m *MonitoredConfig) String() string { return fmt.Sprintf("MonitoredConfig: cluster=%s, user=%s, node_exporter_port=%d, blackbox_exporter_port=%d, %v", m.name, m.deployUser, m.options.NodeExporterPort, m.options.BlackboxExporterPort, m.paths) } tiup-1.16.3/pkg/cluster/task/rmdir.go000066400000000000000000000026061505422223000174320ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // Rmdir is used to delete directory on the target host type Rmdir struct { host string dirs []string } // Execute implements the Task interface func (r *Rmdir) Execute(ctx context.Context) error { exec, found := ctxt.GetInner(ctx).GetExecutor(r.host) if !found { return ErrNoExecutor } cmd := fmt.Sprintf(`rm -rf %s`, strings.Join(r.dirs, " ")) _, _, err := exec.Execute(ctx, cmd, false) if err != nil { return errors.Trace(err) } return nil } // Rollback implements the Task interface func (r *Rmdir) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (r *Rmdir) String() string { return fmt.Sprintf("Rmdir: host=%s, directories='%s'", r.host, strings.Join(r.dirs, "','")) } tiup-1.16.3/pkg/cluster/task/rotate_ssh.go000066400000000000000000000047461505422223000204770ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "os" "strings" "github.com/joomcode/errorx" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" ) // RotateSSH is used to rotate ssh key, e.g: // 1. enable new public key // 2. revoke old public key type RotateSSH struct { host string deployUser string newPublicKeyPath string } // Execute implements the Task interface func (e *RotateSSH) Execute(ctx context.Context) error { return e.exec(ctx) } func (e *RotateSSH) exec(ctx context.Context) error { wrapError := func(err error) *errorx.Error { return ErrEnvInitFailed.Wrap(err, "Failed to Rotate ssh public key on remote host '%s'", e.host) } exec, found := ctxt.GetInner(ctx).GetExecutor(e.host) if !found { panic(ErrNoExecutor) } pubKey, err := os.ReadFile(ctxt.GetInner(ctx).PublicKeyPath) if err != nil { return wrapError(err) } newPubKey, err := os.ReadFile(e.newPublicKeyPath) if err != nil { return wrapError(err) } sshAuthorizedKeys := executor.FindSSHAuthorizedKeysFile(ctx, exec) // enable new key cmd := fmt.Sprintf(`echo %s >> %s`, strings.TrimSpace(string(newPubKey)), sshAuthorizedKeys) _, _, err = exec.Execute(ctx, cmd, false) if err != nil { return wrapError(errEnvInitSubCommandFailed. Wrap(err, "Failed to write new public key to '%s' for user '%s'", sshAuthorizedKeys, e.deployUser)) } // Revoke old key cmd = fmt.Sprintf(`sed -i '\|%[1]s|d' %[2]s`, strings.TrimSpace(string(pubKey)), sshAuthorizedKeys) _, _, err = exec.Execute(ctx, cmd, false) if err != nil { return wrapError(errEnvInitSubCommandFailed. Wrap(err, "Failed to revoke old key for user '%s'", e.deployUser)) } return nil } // Rollback implements the Task interface func (e *RotateSSH) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (e *RotateSSH) String() string { return fmt.Sprintf("RotateSSH: user=%s, host=%s", e.deployUser, e.host) } tiup-1.16.3/pkg/cluster/task/scale_config.go000066400000000000000000000036061505422223000207320ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" ) // ScaleConfig is used to copy all configurations to the target directory of path type ScaleConfig struct { specManager *spec.SpecManager clusterName string clusterVersion string instance spec.Instance base spec.Topology deployUser string paths meta.DirPaths } // Execute implements the Task interface func (c *ScaleConfig) Execute(ctx context.Context) error { // Copy to remote server exec, found := ctxt.GetInner(ctx).GetExecutor(c.instance.GetManageHost()) if !found { return ErrNoExecutor } c.paths.Cache = c.specManager.Path(c.clusterName, spec.TempConfigPath) if err := utils.MkdirAll(c.paths.Cache, 0755); err != nil { return err } return c.instance.ScaleConfig(ctx, exec, c.base, c.clusterName, c.clusterVersion, c.deployUser, c.paths) } // Rollback implements the Task interface func (c *ScaleConfig) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *ScaleConfig) String() string { return fmt.Sprintf("ScaleConfig: cluster=%s, user=%s, host=%s, service=%s, %s", c.clusterName, c.deployUser, c.instance.GetManageHost(), c.instance.ServiceName(), c.paths) } tiup-1.16.3/pkg/cluster/task/shell.go000066400000000000000000000032431505422223000174220ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" logprinter "github.com/pingcap/tiup/pkg/logger/printer" ) // Shell is used to create directory on the target host type Shell struct { host string command string sudo bool cmdID string } // Execute implements the Task interface func (m *Shell) Execute(ctx context.Context) error { exec, found := ctxt.GetInner(ctx).GetExecutor(m.host) if !found { return ErrNoExecutor } ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger). Infof("Run command on %s(sudo:%v): %s", m.host, m.sudo, m.command) stdout, stderr, err := exec.Execute(ctx, m.command, m.sudo) outputID := m.host if m.cmdID != "" { outputID = m.cmdID } ctxt.GetInner(ctx).SetOutputs(outputID, stdout, stderr) if err != nil { return errors.Trace(err) } return nil } // Rollback implements the Task interface func (m *Shell) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (m *Shell) String() string { return fmt.Sprintf("Shell: host=%s, sudo=%v, command=`%s`", m.host, m.sudo, m.command) } tiup-1.16.3/pkg/cluster/task/ssh.go000066400000000000000000000120071505422223000171060ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "time" "github.com/joomcode/errorx" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/executor" ) var ( errNS = errorx.NewNamespace("task") ) // RootSSH is used to establish a SSH connection to the target host with specific key type RootSSH struct { host string // hostname of the SSH server port int // port of the SSH server user string // username to login to the SSH server password string // password of the user keyFile string // path to the private key file passphrase string // passphrase of the private key file timeout uint64 // timeout in seconds when connecting via SSH exeTimeout uint64 // timeout in seconds waiting command to finish proxyHost string // hostname of the proxy SSH server proxyPort int // port of the proxy SSH server proxyUser string // username to login to the proxy SSH server proxyPassword string // password of the proxy user proxyKeyFile string // path to the private key file proxyPassphrase string // passphrase of the private key file proxyTimeout uint64 // timeout in seconds when connecting via SSH sshType executor.SSHType // the type of SSH channel sudo bool } // Execute implements the Task interface func (s *RootSSH) Execute(ctx context.Context) error { sc := executor.SSHConfig{ Host: s.host, Port: s.port, User: s.user, Password: s.password, KeyFile: s.keyFile, Passphrase: s.passphrase, Timeout: time.Second * time.Duration(s.timeout), ExeTimeout: time.Second * time.Duration(s.exeTimeout), } if len(s.proxyHost) > 0 { sc.Proxy = &executor.SSHConfig{ Host: s.proxyHost, Port: s.proxyPort, User: s.proxyUser, Password: s.proxyPassword, KeyFile: s.proxyKeyFile, Passphrase: s.proxyPassphrase, Timeout: time.Second * time.Duration(s.proxyTimeout), } } e, err := executor.New(s.sshType, s.sudo, sc) if err != nil { return err } ctxt.GetInner(ctx).SetExecutor(s.host, e) return nil } // Rollback implements the Task interface func (s *RootSSH) Rollback(ctx context.Context) error { ctxt.GetInner(ctx).SetExecutor(s.host, nil) return nil } // String implements the fmt.Stringer interface func (s RootSSH) String() string { if len(s.keyFile) > 0 { return fmt.Sprintf("RootSSH: user=%s, host=%s, port=%d, key=%s", s.user, s.host, s.port, s.keyFile) } return fmt.Sprintf("RootSSH: user=%s, host=%s, port=%d", s.user, s.host, s.port) } // UserSSH is used to establish an SSH connection to the target host with generated key type UserSSH struct { host string port int deployUser string timeout uint64 exeTimeout uint64 // timeout in seconds waiting command to finish proxyHost string // hostname of the proxy SSH server proxyPort int // port of the proxy SSH server proxyUser string // username to login to the proxy SSH server proxyPassword string // password of the proxy user proxyKeyFile string // path to the private key file proxyPassphrase string // passphrase of the private key file proxyTimeout uint64 // timeout in seconds when connecting via SSH sshType executor.SSHType } // Execute implements the Task interface func (s *UserSSH) Execute(ctx context.Context) error { sc := executor.SSHConfig{ Host: s.host, Port: s.port, KeyFile: ctxt.GetInner(ctx).PrivateKeyPath, User: s.deployUser, Timeout: time.Second * time.Duration(s.timeout), ExeTimeout: time.Second * time.Duration(s.exeTimeout), } if len(s.proxyHost) > 0 { sc.Proxy = &executor.SSHConfig{ Host: s.proxyHost, Port: s.proxyPort, User: s.proxyUser, Password: s.proxyPassword, KeyFile: s.proxyKeyFile, Passphrase: s.proxyPassphrase, Timeout: time.Second * time.Duration(s.proxyTimeout), } } e, err := executor.New(s.sshType, false, sc) if err != nil { return err } ctxt.GetInner(ctx).SetExecutor(s.host, e) return nil } // Rollback implements the Task interface func (s *UserSSH) Rollback(ctx context.Context) error { ctxt.GetInner(ctx).SetExecutor(s.host, nil) return nil } // String implements the fmt.Stringer interface func (s UserSSH) String() string { return fmt.Sprintf("UserSSH: user=%s, host=%s", s.deployUser, s.host) } tiup-1.16.3/pkg/cluster/task/ssh_keygen.go000066400000000000000000000074411505422223000204560ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "crypto/rsa" "crypto/x509" "encoding/pem" "fmt" "os" "path/filepath" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/crypto/rand" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/crypto/ssh" ) // SSHKeyGen is used to generate SSH key type SSHKeyGen struct { keypath string } // Execute implements the Task interface func (s *SSHKeyGen) Execute(ctx context.Context) error { ctxt.GetInner(ctx).Ev.PublishTaskProgress(s, "Generate SSH keys") savePrivateFileTo := s.keypath savePublicFileTo := s.keypath + ".pub" // Skip ssh key generate if utils.IsExist(savePrivateFileTo) && utils.IsExist(savePublicFileTo) { ctxt.GetInner(ctx).PublicKeyPath = savePublicFileTo ctxt.GetInner(ctx).PrivateKeyPath = savePrivateFileTo return nil } bitSize := 4096 ctxt.GetInner(ctx).Ev.PublishTaskProgress(s, "Generate private key") privateKey, err := s.generatePrivateKey(bitSize) if err != nil { return errors.Trace(err) } ctxt.GetInner(ctx).Ev.PublishTaskProgress(s, "Generate public key") publicKeyBytes, err := s.generatePublicKey(&privateKey.PublicKey) if err != nil { return errors.Trace(err) } privateKeyBytes := s.encodePrivateKeyToPEM(privateKey) ctxt.GetInner(ctx).Ev.PublishTaskProgress(s, "Persist keys") err = s.writeKeyToFile(privateKeyBytes, savePrivateFileTo) if err != nil { return errors.Trace(err) } err = s.writeKeyToFile(publicKeyBytes, savePublicFileTo) if err != nil { return errors.Trace(err) } ctxt.GetInner(ctx).PublicKeyPath = savePublicFileTo ctxt.GetInner(ctx).PrivateKeyPath = savePrivateFileTo return nil } // generatePrivateKey creates a RSA Private Key of specified byte size func (s *SSHKeyGen) generatePrivateKey(bitSize int) (*rsa.PrivateKey, error) { // Private Key generation privateKey, err := rsa.GenerateKey(rand.Reader, bitSize) if err != nil { return nil, err } // Validate Private Key err = privateKey.Validate() if err != nil { return nil, err } return privateKey, nil } // encodePrivateKeyToPEM encodes Private Key from RSA to PEM format func (s *SSHKeyGen) encodePrivateKeyToPEM(privateKey *rsa.PrivateKey) []byte { // Get ASN.1 DER format privDER := x509.MarshalPKCS1PrivateKey(privateKey) // pem.Block privBlock := pem.Block{ Type: "RSA PRIVATE KEY", Headers: nil, Bytes: privDER, } // Private key in PEM format return pem.EncodeToMemory(&privBlock) } // generatePublicKey take a rsa.PublicKey and return bytes suitable for writing to .pub file // returns in the format "ssh-rsa ..." func (s *SSHKeyGen) generatePublicKey(privatekey *rsa.PublicKey) ([]byte, error) { publicRsaKey, err := ssh.NewPublicKey(privatekey) if err != nil { return nil, err } return ssh.MarshalAuthorizedKey(publicRsaKey), nil } // writePemToFile writes keys to a file func (s *SSHKeyGen) writeKeyToFile(keyBytes []byte, saveFileTo string) error { if err := os.MkdirAll(filepath.Dir(saveFileTo), 0700); err != nil { return err } return os.WriteFile(saveFileTo, keyBytes, 0600) } // Rollback implements the Task interface func (s *SSHKeyGen) Rollback(ctx context.Context) error { return os.Remove(s.keypath) } // String implements the fmt.Stringer interface func (s *SSHKeyGen) String() string { return fmt.Sprintf("SSHKeyGen: path=%s", s.keypath) } tiup-1.16.3/pkg/cluster/task/ssh_keyset.go000066400000000000000000000024731505422223000205000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) // SSHKeySet is used to set the Context private/public key path type SSHKeySet struct { privateKeyPath string publicKeyPath string } // Execute implements the Task interface func (s *SSHKeySet) Execute(ctx context.Context) error { ctxt.GetInner(ctx).PublicKeyPath = s.publicKeyPath ctxt.GetInner(ctx).PrivateKeyPath = s.privateKeyPath return nil } // Rollback implements the Task interface func (s *SSHKeySet) Rollback(ctx context.Context) error { ctxt.GetInner(ctx).PublicKeyPath = "" ctxt.GetInner(ctx).PrivateKeyPath = "" return nil } // String implements the fmt.Stringer interface func (s *SSHKeySet) String() string { return fmt.Sprintf("SSHKeySet: privateKey=%s, publicKey=%s", s.privateKeyPath, s.publicKeyPath) } tiup-1.16.3/pkg/cluster/task/step.go000066400000000000000000000147351505422223000172760ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "encoding/json" "fmt" "strings" "github.com/pingcap/tiup/pkg/cluster/ctxt" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/tui/progress" ) // StepDisplay is a task that will display a progress bar for inner task. type StepDisplay struct { hidden bool inner Task prefix string children map[Task]struct{} Logger *logprinter.Logger progressBar progress.Bar } func addChildren(m map[Task]struct{}, task Task) { if _, exists := m[task]; exists { return } m[task] = struct{}{} if t, ok := task.(*Serial); ok { t.hideDetailDisplay = true for _, tx := range t.inner { if _, exists := m[tx]; !exists { addChildren(m, tx) } } } else if t, ok := task.(*Parallel); ok { t.hideDetailDisplay = true for _, tx := range t.inner { if _, exists := m[tx]; !exists { addChildren(m, tx) } } } } func newStepDisplay(prefix string, inner Task, logger *logprinter.Logger) *StepDisplay { children := make(map[Task]struct{}) addChildren(children, inner) return &StepDisplay{ inner: inner, prefix: prefix, children: children, Logger: logger, progressBar: progress.NewSingleBar(prefix), } } // SetHidden set step hidden or not. func (s *StepDisplay) SetHidden(h bool) *StepDisplay { s.hidden = h return s } // SetLogger set the logger of step func (s *StepDisplay) SetLogger(logger *logprinter.Logger) *StepDisplay { s.Logger = logger return s } func (s *StepDisplay) resetAsMultiBarItem(b *progress.MultiBar) { s.progressBar = b.AddBar(s.prefix) } // Execute implements the Task interface func (s *StepDisplay) Execute(ctx context.Context) error { if s.hidden { ctxt.GetInner(ctx).Ev.Subscribe(ctxt.EventTaskBegin, s.handleTaskBegin) ctxt.GetInner(ctx).Ev.Subscribe(ctxt.EventTaskProgress, s.handleTaskProgress) err := s.inner.Execute(ctx) ctxt.GetInner(ctx).Ev.Unsubscribe(ctxt.EventTaskProgress, s.handleTaskProgress) ctxt.GetInner(ctx).Ev.Unsubscribe(ctxt.EventTaskBegin, s.handleTaskBegin) return err } switch s.Logger.GetDisplayMode() { case logprinter.DisplayModeJSON, logprinter.DisplayModePlain: // do nothing default: if singleBar, ok := s.progressBar.(*progress.SingleBar); ok { singleBar.StartRenderLoop() defer singleBar.StopRenderLoop() } } ctxt.GetInner(ctx).Ev.Subscribe(ctxt.EventTaskBegin, s.handleTaskBegin) ctxt.GetInner(ctx).Ev.Subscribe(ctxt.EventTaskProgress, s.handleTaskProgress) err := s.inner.Execute(ctx) ctxt.GetInner(ctx).Ev.Unsubscribe(ctxt.EventTaskProgress, s.handleTaskProgress) ctxt.GetInner(ctx).Ev.Unsubscribe(ctxt.EventTaskBegin, s.handleTaskBegin) var dp *progress.DisplayProps if err != nil { dp = &progress.DisplayProps{ Prefix: s.prefix, Mode: progress.ModeError, } } else { dp = &progress.DisplayProps{ Prefix: s.prefix, Mode: progress.ModeDone, } } switch s.Logger.GetDisplayMode() { case logprinter.DisplayModeJSON: _ = printDpJSON(dp) case logprinter.DisplayModePlain: printDpPlain(s.Logger, dp) default: s.progressBar.UpdateDisplay(dp) } return err } // Rollback implements the Task interface func (s *StepDisplay) Rollback(ctx context.Context) error { return s.inner.Rollback(ctx) } // String implements the fmt.Stringer interface func (s *StepDisplay) String() string { return s.inner.String() } func (s *StepDisplay) handleTaskBegin(task Task) { if _, ok := s.children[task]; !ok { return } dp := &progress.DisplayProps{ Prefix: s.prefix, Suffix: strings.Split(task.String(), "\n")[0], } switch s.Logger.GetDisplayMode() { case logprinter.DisplayModeJSON: _ = printDpJSON(dp) case logprinter.DisplayModePlain: printDpPlain(s.Logger, dp) default: s.progressBar.UpdateDisplay(dp) } } func (s *StepDisplay) handleTaskProgress(task Task, p string) { if _, ok := s.children[task]; !ok { return } dp := &progress.DisplayProps{ Prefix: s.prefix, Suffix: strings.Split(p, "\n")[0], } switch s.Logger.GetDisplayMode() { case logprinter.DisplayModeJSON: _ = printDpJSON(dp) case logprinter.DisplayModePlain: printDpPlain(s.Logger, dp) default: s.progressBar.UpdateDisplay(dp) } } // ParallelStepDisplay is a task that will display multiple progress bars in parallel for inner tasks. // Inner tasks will be executed in parallel. type ParallelStepDisplay struct { inner *Parallel prefix string Logger *logprinter.Logger progressBar *progress.MultiBar } func newParallelStepDisplay(prefix string, ignoreError bool, sdTasks ...*StepDisplay) *ParallelStepDisplay { bar := progress.NewMultiBar(prefix) tasks := make([]Task, 0, len(sdTasks)) for _, t := range sdTasks { if !t.hidden { t.resetAsMultiBarItem(bar) } tasks = append(tasks, t) } return &ParallelStepDisplay{ inner: &Parallel{inner: tasks, ignoreError: ignoreError}, prefix: prefix, progressBar: bar, } } // SetLogger set the logger of step func (ps *ParallelStepDisplay) SetLogger(logger *logprinter.Logger) *ParallelStepDisplay { ps.Logger = logger return ps } // Execute implements the Task interface func (ps *ParallelStepDisplay) Execute(ctx context.Context) error { switch ps.Logger.GetDisplayMode() { case logprinter.DisplayModeJSON, logprinter.DisplayModePlain: // do nothing default: ps.progressBar.StartRenderLoop() defer ps.progressBar.StopRenderLoop() } err := ps.inner.Execute(ctx) return err } // Rollback implements the Task interface func (ps *ParallelStepDisplay) Rollback(ctx context.Context) error { return ps.inner.Rollback(ctx) } // String implements the fmt.Stringer interface func (ps *ParallelStepDisplay) String() string { return ps.inner.String() } func printDpJSON(dp *progress.DisplayProps) error { output, err := json.Marshal(dp) if err != nil { return err } fmt.Println(string(output)) return nil } func printDpPlain(logger *logprinter.Logger, dp *progress.DisplayProps) { switch dp.Mode { case progress.ModeError: logger.Errorf("progress: %s", dp) default: logger.Infof("progress: %s", dp) } } tiup-1.16.3/pkg/cluster/task/sysctl.go000066400000000000000000000033101505422223000176270ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "strings" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" ) var ( sysctlFilePath = "/etc/sysctl.d/99-sysctl.conf" ) // Sysctl set a kernel param on host type Sysctl struct { host string key string val string sudo bool } // Execute implements the Task interface func (s *Sysctl) Execute(ctx context.Context) error { e, ok := ctxt.GetInner(ctx).GetExecutor(s.host) if !ok { return ErrNoExecutor } cmd := strings.Join([]string{ fmt.Sprintf("cp %s{,.bak} 2>/dev/null", sysctlFilePath), fmt.Sprintf("sed -i '/%s/d' %s 2>/dev/null", s.key, sysctlFilePath), fmt.Sprintf("echo '%s=%s' >> %s", s.key, s.val, sysctlFilePath), fmt.Sprintf("sysctl -p %s", sysctlFilePath), }, " && ") stdout, stderr, err := e.Execute(ctx, cmd, s.sudo) ctxt.GetInner(ctx).SetOutputs(s.host, stdout, stderr) if err != nil { return errors.Trace(err) } return nil } // Rollback implements the Task interface func (s *Sysctl) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (s *Sysctl) String() string { return fmt.Sprintf("Sysctl: host=%s %s = %s", s.host, s.key, s.val) } tiup-1.16.3/pkg/cluster/task/systemd.go000066400000000000000000000034131505422223000200020ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/module" ) // SystemCtl run systemctl command on host type SystemCtl struct { host string unit string action string daemonReload bool checkactive bool scope string } // Execute implements the Task interface func (c *SystemCtl) Execute(ctx context.Context) error { e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { return ErrNoExecutor } cfg := module.SystemdModuleConfig{ Unit: c.unit, Action: c.action, ReloadDaemon: c.daemonReload, CheckActive: c.checkactive, Scope: c.scope, } systemd := module.NewSystemdModule(cfg) stdout, stderr, err := systemd.Execute(ctx, e) ctxt.GetInner(ctx).SetOutputs(c.host, stdout, stderr) if err != nil { return errors.Annotatef(err, "stdout: %s, stderr:%s", string(stdout), string(stderr)) } return nil } // Rollback implements the Task interface func (c *SystemCtl) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *SystemCtl) String() string { return fmt.Sprintf("SystemCtl: host=%s action=%s %s", c.host, c.action, c.unit) } tiup-1.16.3/pkg/cluster/task/task.go000066400000000000000000000113411505422223000172530ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" stderrors "errors" "fmt" "strings" "sync" "github.com/pingcap/tiup/pkg/checkpoint" "github.com/pingcap/tiup/pkg/cluster/ctxt" logprinter "github.com/pingcap/tiup/pkg/logger/printer" ) var ( // ErrUnsupportedRollback means the task do not support rollback. ErrUnsupportedRollback = stderrors.New("unsupported rollback") // ErrNoExecutor means not being able to get the executor. ErrNoExecutor = stderrors.New("no executor") // ErrNoOutput means not being able to get the output of host. ErrNoOutput = stderrors.New("no outputs available") ) type ( // Task represents a operation while TiUP execution Task interface { fmt.Stringer Execute(ctx context.Context) error Rollback(ctx context.Context) error } // Serial will execute a bundle of task in serialized way Serial struct { ignoreError bool hideDetailDisplay bool inner []Task } // Parallel will execute a bundle of task in parallelism way Parallel struct { ignoreError bool hideDetailDisplay bool inner []Task } ) func isDisplayTask(t Task) bool { if _, ok := t.(*Serial); ok { return true } if _, ok := t.(*Parallel); ok { return true } if _, ok := t.(*StepDisplay); ok { return true } if _, ok := t.(*ParallelStepDisplay); ok { return true } return false } // Execute implements the Task interface func (s *Serial) Execute(ctx context.Context) error { for _, t := range s.inner { if !isDisplayTask(t) { if !s.hideDetailDisplay { ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger). Infof("+ [ Serial ] - %s", t.String()) } } ctxt.GetInner(ctx).Ev.PublishTaskBegin(t) err := t.Execute(ctx) ctxt.GetInner(ctx).Ev.PublishTaskFinish(t, err) if err != nil && !s.ignoreError { return err } } return nil } // Rollback implements the Task interface func (s *Serial) Rollback(ctx context.Context) error { // Rollback in reverse order for i := len(s.inner) - 1; i >= 0; i-- { err := s.inner[i].Rollback(ctx) if err != nil { return err } } return nil } // String implements the fmt.Stringer interface func (s *Serial) String() string { var ss []string for _, t := range s.inner { ss = append(ss, t.String()) } return strings.Join(ss, "\n") } // Execute implements the Task interface func (pt *Parallel) Execute(ctx context.Context) error { var firstError error var mu sync.Mutex wg := sync.WaitGroup{} maxWorkers := ctxt.GetInner(ctx).Concurrency workerPool := make(chan struct{}, maxWorkers) for _, t := range pt.inner { wg.Add(1) workerPool <- struct{}{} // the checkpoint part of context can't be shared between goroutines // since it's used to trace the stack, so we must create a new layer // of checkpoint context every time put it into a new goroutine. go func(ctx context.Context, t Task) { defer func() { <-workerPool wg.Done() }() if !isDisplayTask(t) { if !pt.hideDetailDisplay { ctx.Value(logprinter.ContextKeyLogger).(*logprinter.Logger). Infof("+ [Parallel] - %s", t.String()) } } ctxt.GetInner(ctx).Ev.PublishTaskBegin(t) err := t.Execute(ctx) ctxt.GetInner(ctx).Ev.PublishTaskFinish(t, err) if err != nil { mu.Lock() if firstError == nil { firstError = err } mu.Unlock() } }(checkpoint.NewContext(ctx), t) } wg.Wait() if pt.ignoreError { return nil } return firstError } // Rollback implements the Task interface func (pt *Parallel) Rollback(ctx context.Context) error { var firstError error var mu sync.Mutex wg := sync.WaitGroup{} for _, t := range pt.inner { wg.Add(1) // the checkpoint part of context can't be shared between goroutines // since it's used to trace the stack, so we must create a new layer // of checkpoint context every time put it into a new goroutine. go func(ctx context.Context, t Task) { defer wg.Done() err := t.Rollback(ctx) if err != nil { mu.Lock() if firstError == nil { firstError = err } mu.Unlock() } }(checkpoint.NewContext(ctx), t) } wg.Wait() return firstError } // String implements the fmt.Stringer interface func (pt *Parallel) String() string { var ss []string for _, t := range pt.inner { ss = append(ss, t.String()) } return strings.Join(ss, "\n") } tiup-1.16.3/pkg/cluster/task/tls.go000066400000000000000000000074041505422223000171200ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "encoding/pem" "fmt" "net" "path/filepath" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/ctxt" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/crypto" "github.com/pingcap/tiup/pkg/meta" "github.com/pingcap/tiup/pkg/utils" ) // TLSCert generates a certificate for instance type TLSCert struct { comp string role string host string port int ca *crypto.CertificateAuthority paths meta.DirPaths } // Execute implements the Task interface func (c *TLSCert) Execute(ctx context.Context) error { privKey, err := crypto.NewKeyPair(crypto.KeyTypeRSA, crypto.KeySchemeRSASSAPSSSHA256) if err != nil { return err } // Add localhost and 127.0.0.1 to the trust list, // then it is easy for some scripts to request a local interface directly hosts := []string{"localhost"} ips := []string{"127.0.0.1"} if host := c.host; net.ParseIP(host) != nil && host != "127.0.0.1" { ips = append(ips, host) } else if host != "localhost" { hosts = append(hosts, host) } csr, err := privKey.CSR(c.role, c.comp, hosts, ips) if err != nil { return err } cert, err := c.ca.Sign(csr) if err != nil { return err } // make sure the cache dir exist if err := utils.MkdirAll(c.paths.Cache, 0755); err != nil { return err } // save cert to cache dir keyFileName := fmt.Sprintf("%s-%s-%d.pem", c.role, c.host, c.port) certFileName := fmt.Sprintf("%s-%s-%d.crt", c.role, c.host, c.port) keyFile := filepath.Join( c.paths.Cache, keyFileName, ) certFile := filepath.Join( c.paths.Cache, certFileName, ) caFile := filepath.Join(c.paths.Cache, spec.TLSCACert) if err := utils.SaveFileWithBackup(keyFile, privKey.Pem(), ""); err != nil { return err } if err := utils.SaveFileWithBackup(certFile, pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: cert, }), ""); err != nil { return err } if err := utils.SaveFileWithBackup(caFile, pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", Bytes: c.ca.Cert.Raw, }), ""); err != nil { return err } // transfer file to remote e, ok := ctxt.GetInner(ctx).GetExecutor(c.host) if !ok { return ErrNoExecutor } if err := e.Transfer(ctx, caFile, filepath.Join(c.paths.Deploy, spec.TLSCertKeyDir, spec.TLSCACert), false, /* download */ 0, /* limit */ false /* compress */); err != nil { return errors.Annotate(err, "failed to transfer CA cert to server") } if err := e.Transfer(ctx, keyFile, filepath.Join(c.paths.Deploy, spec.TLSCertKeyDir, fmt.Sprintf("%s.pem", c.role)), false, /* download */ 0, /* limit */ false /* compress */); err != nil { return errors.Annotate(err, "failed to transfer TLS private key to server") } if err := e.Transfer(ctx, certFile, filepath.Join(c.paths.Deploy, spec.TLSCertKeyDir, fmt.Sprintf("%s.crt", c.role)), false, /* download */ 0, /* limit */ false /* compress */); err != nil { return errors.Annotate(err, "failed to transfer TLS cert to server") } return nil } // Rollback implements the Task interface func (c *TLSCert) Rollback(ctx context.Context) error { return ErrUnsupportedRollback } // String implements the fmt.Stringer interface func (c *TLSCert) String() string { return fmt.Sprintf("TLSCert: host=%s role=%s cn=%s", c.host, c.role, c.comp) } tiup-1.16.3/pkg/cluster/task/update_meta.go000066400000000000000000000152141505422223000206040ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package task import ( "context" "fmt" "strings" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/set" ) // UpdateMeta is used to maintain the cluster meta information type UpdateMeta struct { cluster string metadata *spec.ClusterMeta deletedNodeIDs []string } // Execute implements the Task interface // the metadata especially the topology is in wide use, // the other callers point to this field by a pointer, // so we should update the original topology directly, and don't make a copy func (u *UpdateMeta) Execute(ctx context.Context) error { deleted := set.NewStringSet(u.deletedNodeIDs...) topo := u.metadata.Topology newMeta := &spec.ClusterMeta{} *newMeta = *u.metadata newMeta.Topology = &spec.Specification{ GlobalOptions: u.metadata.Topology.GlobalOptions, MonitoredOptions: u.metadata.Topology.MonitoredOptions, ServerConfigs: u.metadata.Topology.ServerConfigs, } tidbServers := make([]*spec.TiDBSpec, 0) for i, instance := range (&spec.TiDBComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tidbServers = append(tidbServers, topo.TiDBServers[i]) } newMeta.Topology.TiDBServers = tidbServers tikvServers := make([]*spec.TiKVSpec, 0) for i, instance := range (&spec.TiKVComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tikvServers = append(tikvServers, topo.TiKVServers[i]) } newMeta.Topology.TiKVServers = tikvServers pdServers := make([]*spec.PDSpec, 0) for i, instance := range (&spec.PDComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } pdServers = append(pdServers, topo.PDServers[i]) } newMeta.Topology.PDServers = pdServers tsoServers := make([]*spec.TSOSpec, 0) for i, instance := range (&spec.TSOComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tsoServers = append(tsoServers, topo.TSOServers[i]) } newMeta.Topology.TSOServers = tsoServers schedulingServers := make([]*spec.SchedulingSpec, 0) for i, instance := range (&spec.SchedulingComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } schedulingServers = append(schedulingServers, topo.SchedulingServers[i]) } newMeta.Topology.SchedulingServers = schedulingServers tiproxyServers := make([]*spec.TiProxySpec, 0) for i, instance := range (&spec.TiProxyComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tiproxyServers = append(tiproxyServers, topo.TiProxyServers[i]) } newMeta.Topology.TiProxyServers = tiproxyServers dashboardServers := make([]*spec.DashboardSpec, 0) for i, instance := range (&spec.DashboardComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } dashboardServers = append(dashboardServers, topo.DashboardServers[i]) } newMeta.Topology.DashboardServers = dashboardServers tiflashServers := make([]*spec.TiFlashSpec, 0) for i, instance := range (&spec.TiFlashComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tiflashServers = append(tiflashServers, topo.TiFlashServers[i]) } newMeta.Topology.TiFlashServers = tiflashServers pumpServers := make([]*spec.PumpSpec, 0) for i, instance := range (&spec.PumpComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } pumpServers = append(pumpServers, topo.PumpServers[i]) } newMeta.Topology.PumpServers = pumpServers drainerServers := make([]*spec.DrainerSpec, 0) for i, instance := range (&spec.DrainerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } drainerServers = append(drainerServers, topo.Drainers[i]) } newMeta.Topology.Drainers = drainerServers cdcServers := make([]*spec.CDCSpec, 0) for i, instance := range (&spec.CDCComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } cdcServers = append(cdcServers, topo.CDCServers[i]) } newMeta.Topology.CDCServers = cdcServers tikvCDCServers := make([]*spec.TiKVCDCSpec, 0) for i, instance := range (&spec.TiKVCDCComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tikvCDCServers = append(tikvCDCServers, topo.TiKVCDCServers[i]) } newMeta.Topology.TiKVCDCServers = tikvCDCServers tisparkWorkers := make([]*spec.TiSparkWorkerSpec, 0) for i, instance := range (&spec.TiSparkWorkerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tisparkWorkers = append(tisparkWorkers, topo.TiSparkWorkers[i]) } newMeta.Topology.TiSparkWorkers = tisparkWorkers tisparkMasters := make([]*spec.TiSparkMasterSpec, 0) for i, instance := range (&spec.TiSparkMasterComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } tisparkMasters = append(tisparkMasters, topo.TiSparkMasters[i]) } newMeta.Topology.TiSparkMasters = tisparkMasters monitors := make([]*spec.PrometheusSpec, 0) for i, instance := range (&spec.MonitorComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } monitors = append(monitors, topo.Monitors[i]) } newMeta.Topology.Monitors = monitors grafanas := make([]*spec.GrafanaSpec, 0) for i, instance := range (&spec.GrafanaComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } grafanas = append(grafanas, topo.Grafanas[i]) } newMeta.Topology.Grafanas = grafanas alertmanagers := make([]*spec.AlertmanagerSpec, 0) for i, instance := range (&spec.AlertManagerComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { continue } alertmanagers = append(alertmanagers, topo.Alertmanagers[i]) } newMeta.Topology.Alertmanagers = alertmanagers return spec.SaveClusterMeta(u.cluster, newMeta) } // Rollback implements the Task interface func (u *UpdateMeta) Rollback(ctx context.Context) error { return spec.SaveClusterMeta(u.cluster, u.metadata) } // String implements the fmt.Stringer interface func (u *UpdateMeta) String() string { return fmt.Sprintf("UpdateMeta: cluster=%s, deleted=`'%s'`", u.cluster, strings.Join(u.deletedNodeIDs, "','")) } tiup-1.16.3/pkg/cluster/task/update_topology.go000066400000000000000000000111211505422223000215230ustar00rootroot00000000000000package task import ( "context" "encoding/json" "fmt" "path/filepath" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/cluster/spec" "github.com/pingcap/tiup/pkg/proxy" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" clientv3 "go.etcd.io/etcd/client/v3" ) // UpdateTopology is used to maintain the cluster meta information type UpdateTopology struct { cluster string profileDir string metadata *spec.ClusterMeta deletedNodeIDs []string tcpProxy *proxy.TCPProxy } // String implements the fmt.Stringer interface func (u *UpdateTopology) String() string { return fmt.Sprintf("UpdateTopology: cluster=%s", u.cluster) } // Execute implements the Task interface func (u *UpdateTopology) Execute(ctx context.Context) error { tlsCfg, err := u.metadata.Topology.TLSConfig( filepath.Join(u.profileDir, spec.TLSCertKeyDir), ) if err != nil { return err } var client *clientv3.Client if u.tcpProxy == nil { client, err = u.metadata.Topology.GetEtcdClient(tlsCfg) } else { var closeC chan struct{} client, closeC, err = u.metadata.Topology.GetEtcdProxyClient(tlsCfg, u.tcpProxy) defer u.tcpProxy.Close(closeC) } if err != nil { return err } // fix https://github.com/pingcap/tiup/issues/333 // etcd client defaults to wait forever // if all pd were down, don't hang forever ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() txn := client.Txn(ctx) topo := u.metadata.Topology deleted := set.NewStringSet(u.deletedNodeIDs...) var ops []clientv3.Op var instances []spec.Instance ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.MonitorComponent{Topology: topo}).Instances(), "prometheus") ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.GrafanaComponent{Topology: topo}).Instances(), "grafana") ops, instances = updateInstancesAndOps(ops, instances, deleted, (&spec.AlertManagerComponent{Topology: topo}).Instances(), "alertmanager") for _, instance := range (&spec.TiDBComponent{Topology: topo}).Instances() { if deleted.Exist(instance.ID()) { ops = append(ops, clientv3.OpDelete("/topology/tidb/"+utils.JoinHostPort(instance.GetHost(), instance.GetPort()), clientv3.WithPrefix())) } } // the prometheus,grafana,alertmanager stored in etcd will be used by other components (tidb, pd, etc.) // and they assume there is ONLY ONE prometheus. // ref https://github.com/pingcap/tiup/issues/954#issuecomment-737002185 updated := set.NewStringSet() for _, ins := range instances { if updated.Exist(ins.ComponentName()) { continue } op, err := updateTopologyOp(ins) if err != nil { return err } updated.Insert(ins.ComponentName()) ops = append(ops, *op) } _, err = txn.Then(ops...).Commit() return err } // componentTopology represent the topology info for alertmanager, prometheus and grafana. type componentTopology struct { IP string `json:"ip"` Port int `json:"port"` DeployPath string `json:"deploy_path"` } // updateInstancesAndOps receives alertmanager, prometheus and grafana instance list, if the list has // // no member or all deleted, it will add a `OpDelete` in ops, otherwise it will push all current not deleted instances into instance list. func updateInstancesAndOps(ops []clientv3.Op, ins []spec.Instance, deleted set.StringSet, instances []spec.Instance, componentName string) ([]clientv3.Op, []spec.Instance) { var currentInstances []spec.Instance for _, instance := range instances { if deleted.Exist(instance.ID()) { continue } currentInstances = append(currentInstances, instance) } if len(currentInstances) == 0 { ops = append(ops, clientv3.OpDelete("/topology/"+componentName)) } else { ins = append(ins, currentInstances...) } return ops, ins } // updateTopologyOp receive an alertmanager, prometheus or grafana instance, and return an operation // // for update it's topology. func updateTopologyOp(instance spec.Instance) (*clientv3.Op, error) { switch compName := instance.ComponentName(); compName { case spec.ComponentAlertmanager, spec.ComponentPrometheus, spec.ComponentGrafana: topology := componentTopology{ IP: instance.GetHost(), Port: instance.GetPort(), DeployPath: instance.DeployDir(), } data, err := json.Marshal(topology) if err != nil { return nil, err } op := clientv3.OpPut("/topology/"+compName, string(data)) return &op, nil default: return nil, errors.New("Wrong arguments: updateTopologyOp receives wrong arguments") } } // Rollback implements the Task interface func (u *UpdateTopology) Rollback(ctx context.Context) error { return nil } tiup-1.16.3/pkg/cluster/template/000077500000000000000000000000001505422223000166335ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/template/config/000077500000000000000000000000001505422223000201005ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/template/config/alertmanager.go000066400000000000000000000031411505422223000230700ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "path" "github.com/pingcap/errors" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // AlertManagerConfig represent the data to generate AlertManager config type AlertManagerConfig struct{} // NewAlertManagerConfig returns a AlertManagerConfig func NewAlertManagerConfig() *AlertManagerConfig { return &AlertManagerConfig{} } // Config generate the config file data. func (c *AlertManagerConfig) Config() ([]byte, error) { fp := path.Join("templates", "config", "alertmanager.yml") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigWithTemplate generate the AlertManager config content by tpl func (c *AlertManagerConfig) ConfigWithTemplate(tpl string) ([]byte, error) { return []byte(tpl), nil } // ConfigToFile write config content to specific path func (c *AlertManagerConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } if err := utils.WriteFile(file, config, 0755); err != nil { return errors.AddStack(err) } return nil } tiup-1.16.3/pkg/cluster/template/config/blackbox.go000066400000000000000000000034771505422223000222270ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // BlackboxConfig represent the data to generate AlertManager config type BlackboxConfig struct { DeployDir string TLSEnabled bool } // NewBlackboxConfig returns a BlackboxConfig func NewBlackboxConfig(deployDir string, tlsEnabled bool) *BlackboxConfig { return &BlackboxConfig{ DeployDir: deployDir, TLSEnabled: tlsEnabled, } } // Config generate the config file data. func (c *BlackboxConfig) Config() ([]byte, error) { fp := path.Join("templates", "config", "blackbox.yml.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigToFile write config content to specific path func (c *BlackboxConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } // ConfigWithTemplate generate the AlertManager config content by tpl func (c *BlackboxConfig) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("Blackbox").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/config/config.go000066400000000000000000000014271505422223000217000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "path/filepath" "github.com/pingcap/tiup/embed" ) // GetConfig returns a raw config file from embed templates func GetConfig(filename string) ([]byte, error) { fp := filepath.Join("templates", "config", filename) return embed.ReadTemplate(fp) } tiup-1.16.3/pkg/cluster/template/config/dashboard.go000066400000000000000000000035001505422223000223540ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // DashboardConfig represent the data to generate Dashboard config type DashboardConfig struct { ClusterName string DeployDir string } // NewDashboardConfig returns a DashboardConfig func NewDashboardConfig(cluster, deployDir string) *DashboardConfig { return &DashboardConfig{ ClusterName: cluster, DeployDir: deployDir, } } // Config generate the config file data. func (c *DashboardConfig) Config() ([]byte, error) { fp := path.Join("templates", "config", "dashboard.yml.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigWithTemplate generate the Dashboard config content by tpl func (c *DashboardConfig) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("dashboard").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } // ConfigToFile write config content to specific path func (c *DashboardConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } tiup-1.16.3/pkg/cluster/template/config/datasource.go000066400000000000000000000042641505422223000225670ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // DatasourceConfig represent the data to generate Datasource config type DatasourceConfig struct { Name string Type string URL string IsDefault bool } // NewDatasourceConfig returns a DatasourceConfig func NewDatasourceConfig(clusterName, url string) *DatasourceConfig { return &DatasourceConfig{ Name: clusterName, Type: "prometheus", URL: url, IsDefault: true, } } // WithName sets name of datasource func (c *DatasourceConfig) WithName(name string) *DatasourceConfig { c.Name = name return c } // WithType sets type of datasource func (c *DatasourceConfig) WithType(typ string) *DatasourceConfig { c.Type = typ return c } // WithIsDefault sets if datasource is default func (c *DatasourceConfig) WithIsDefault(isDefault bool) *DatasourceConfig { c.IsDefault = isDefault return c } // ConfigToFile write config content to specific path func (c *DatasourceConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } // Config generate the config file data. func (c *DatasourceConfig) Config() ([]byte, error) { fp := path.Join("templates", "config", "datasource.yml.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } tmpl, err := template.New("Datasource").Parse(string(tpl)) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, map[string]any{ "Datasources": []any{c}, }); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/config/grafana.go000066400000000000000000000067261505422223000220410ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // GrafanaConfig represent the data to generate Grafana config type GrafanaConfig struct { DeployDir string IP string Port uint64 Username string // admin_user Password string // admin_password AnonymousEnable bool // anonymous enable RootURL string // root_url Domain string // domain DefaultTheme string // default_theme OrgName string // org_name OrgRole string // org_role } // NewGrafanaConfig returns a GrafanaConfig func NewGrafanaConfig(ip, deployDir string) *GrafanaConfig { return &GrafanaConfig{ DeployDir: deployDir, IP: ip, Port: 3000, } } // WithPort set Port field of GrafanaConfig func (c *GrafanaConfig) WithPort(port uint64) *GrafanaConfig { c.Port = port return c } // WithUsername sets username of admin user func (c *GrafanaConfig) WithUsername(user string) *GrafanaConfig { c.Username = user return c } // WithPassword sets password of admin user func (c *GrafanaConfig) WithPassword(passwd string) *GrafanaConfig { c.Password = passwd return c } // WithAnonymousenable sets anonymousEnable of anonymousEnable func (c *GrafanaConfig) WithAnonymousenable(anonymousEnable bool) *GrafanaConfig { c.AnonymousEnable = anonymousEnable return c } // WithRootURL sets rootURL of root url func (c *GrafanaConfig) WithRootURL(rootURL string) *GrafanaConfig { c.RootURL = rootURL return c } // WithDomain sets domain of server domain func (c *GrafanaConfig) WithDomain(domain string) *GrafanaConfig { c.Domain = domain return c } // WithDefaultTheme sets defaultTheme of default theme func (c *GrafanaConfig) WithDefaultTheme(defaultTheme string) *GrafanaConfig { c.DefaultTheme = defaultTheme return c } // WithOrgName sets orgName of org name func (c *GrafanaConfig) WithOrgName(orgName string) *GrafanaConfig { c.OrgName = orgName return c } // WithOrgRole sets orgName of org role func (c *GrafanaConfig) WithOrgRole(orgRole string) *GrafanaConfig { c.OrgRole = orgRole return c } // Config generate the config file data. func (c *GrafanaConfig) Config() ([]byte, error) { fp := path.Join("templates", "config", "grafana.ini.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigWithTemplate generate the Grafana config content by tpl func (c *GrafanaConfig) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("Grafana").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } // ConfigToFile write config content to specific path func (c *GrafanaConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } tiup-1.16.3/pkg/cluster/template/config/ngmonitoring.go000066400000000000000000000026431505422223000231460ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // NgMonitoringConfig represent the data to generate NgMonitoring config type NgMonitoringConfig struct { ClusterName string Address string AdvertiseAddress string TLSEnabled bool PDAddrs string DeployDir string DataDir string LogDir string } // ConfigToFile write config content to specific path func (c *NgMonitoringConfig) ConfigToFile(file string) error { fp := path.Join("templates", "config", "ngmonitoring.toml.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("NgMonitoring").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/config/prometheus.go000066400000000000000000000224441505422223000226300ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "path" "strings" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/tidbver" "github.com/pingcap/tiup/pkg/utils" ) // PrometheusConfig represent the data to generate Prometheus config type PrometheusConfig struct { ClusterName string ScrapeInterval string ScrapeTimeout string TLSEnabled bool NodeExporterAddrs []string TiDBStatusAddrs []string TiProxyStatusAddrs []string TiKVStatusAddrs []string PDAddrs []string TSOAddrs []string SchedulingAddrs []string TiFlashStatusAddrs []string TiFlashLearnerStatusAddrs []string PumpAddrs []string DrainerAddrs []string CDCAddrs []string TiKVCDCAddrs []string BlackboxExporterAddrs []string LightningAddrs []string MonitoredServers []string AlertmanagerAddrs []string NGMonitoringAddrs []string PushgatewayAddrs []string BlackboxAddr string GrafanaAddr string HasTiKVAccelerateRules bool DMMasterAddrs []string DMWorkerAddrs []string LocalRules []string RemoteConfig string } // NewPrometheusConfig returns a PrometheusConfig func NewPrometheusConfig(clusterName, clusterVersion string, enableTLS bool) *PrometheusConfig { cfg := &PrometheusConfig{ ClusterName: clusterName, TLSEnabled: enableTLS, HasTiKVAccelerateRules: tidbver.PrometheusHasTiKVAccelerateRules(clusterVersion), } return cfg } // AddNodeExpoertor add a node expoter address func (c *PrometheusConfig) AddNodeExpoertor(ip string, port uint64) *PrometheusConfig { c.NodeExporterAddrs = append(c.NodeExporterAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddTiDB add a TiDB address func (c *PrometheusConfig) AddTiDB(ip string, port uint64) *PrometheusConfig { c.TiDBStatusAddrs = append(c.TiDBStatusAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddTiProxy add a TiProxy address func (c *PrometheusConfig) AddTiProxy(ip string, port uint64) *PrometheusConfig { c.TiProxyStatusAddrs = append(c.TiProxyStatusAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddTiKV add a TiKV address func (c *PrometheusConfig) AddTiKV(ip string, port uint64) *PrometheusConfig { c.TiKVStatusAddrs = append(c.TiKVStatusAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddPD add a PD address func (c *PrometheusConfig) AddPD(ip string, port uint64) *PrometheusConfig { c.PDAddrs = append(c.PDAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddTSO add a TSO address func (c *PrometheusConfig) AddTSO(ip string, port uint64) *PrometheusConfig { c.TSOAddrs = append(c.TSOAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddScheduling add a scheduling address func (c *PrometheusConfig) AddScheduling(ip string, port uint64) *PrometheusConfig { c.SchedulingAddrs = append(c.SchedulingAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddTiFlashLearner add a TiFlash learner address func (c *PrometheusConfig) AddTiFlashLearner(ip string, port uint64) *PrometheusConfig { c.TiFlashLearnerStatusAddrs = append(c.TiFlashLearnerStatusAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddTiFlash add a TiFlash address func (c *PrometheusConfig) AddTiFlash(ip string, port uint64) *PrometheusConfig { c.TiFlashStatusAddrs = append(c.TiFlashStatusAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddPump add a pump address func (c *PrometheusConfig) AddPump(ip string, port uint64) *PrometheusConfig { c.PumpAddrs = append(c.PumpAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddDrainer add a drainer address func (c *PrometheusConfig) AddDrainer(ip string, port uint64) *PrometheusConfig { c.DrainerAddrs = append(c.DrainerAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddCDC add a cdc address func (c *PrometheusConfig) AddCDC(ip string, port uint64) *PrometheusConfig { c.CDCAddrs = append(c.CDCAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddTiKVCDC add a tikv-cdc address func (c *PrometheusConfig) AddTiKVCDC(ip string, port uint64) *PrometheusConfig { c.TiKVCDCAddrs = append(c.TiKVCDCAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddBlackboxExporter add a BlackboxExporter address func (c *PrometheusConfig) AddBlackboxExporter(ip string, port uint64) *PrometheusConfig { c.BlackboxExporterAddrs = append(c.BlackboxExporterAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddLightning add a lightning address func (c *PrometheusConfig) AddLightning(ip string, port uint64) *PrometheusConfig { c.LightningAddrs = append(c.LightningAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddMonitoredServer add a MonitoredServer address func (c *PrometheusConfig) AddMonitoredServer(ip string) *PrometheusConfig { c.MonitoredServers = append(c.MonitoredServers, ip) return c } // AddAlertmanager add an alertmanager address func (c *PrometheusConfig) AddAlertmanager(ip string, port uint64) *PrometheusConfig { c.AlertmanagerAddrs = append(c.AlertmanagerAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddPushgateway add an pushgateway address func (c *PrometheusConfig) AddPushgateway(addresses []string) *PrometheusConfig { c.PushgatewayAddrs = addresses return c } // AddBlackbox add an blackbox address func (c *PrometheusConfig) AddBlackbox(ip string, port uint64) *PrometheusConfig { c.BlackboxAddr = utils.JoinHostPort(ip, int(port)) return c } // AddNGMonitoring add an ng-monitoring server exporter address func (c *PrometheusConfig) AddNGMonitoring(ip string, port uint64) *PrometheusConfig { c.NGMonitoringAddrs = append(c.NGMonitoringAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddGrafana add an Grafana address func (c *PrometheusConfig) AddGrafana(ip string, port uint64) *PrometheusConfig { c.GrafanaAddr = utils.JoinHostPort(ip, int(port)) return c } // AddDMMaster add an dm-master address func (c *PrometheusConfig) AddDMMaster(ip string, port uint64) *PrometheusConfig { c.DMMasterAddrs = append(c.DMMasterAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddDMWorker add an dm-worker address func (c *PrometheusConfig) AddDMWorker(ip string, port uint64) *PrometheusConfig { c.DMWorkerAddrs = append(c.DMWorkerAddrs, utils.JoinHostPort(ip, int(port))) return c } // AddLocalRule add a local rule func (c *PrometheusConfig) AddLocalRule(rule string) *PrometheusConfig { c.LocalRules = append(c.LocalRules, rule) return c } // SetRemoteConfig set remote read/write config func (c *PrometheusConfig) SetRemoteConfig(cfg string) *PrometheusConfig { c.RemoteConfig = cfg return c } // Config generate the config file data. func (c *PrometheusConfig) Config() ([]byte, error) { fp := path.Join("templates", "config", "prometheus.yml.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigWithAgentMode generate the config file data with agent mode enabled/disabled. // In agent mode, we need to exclude rule_files section which is not supported. func (c *PrometheusConfig) ConfigWithAgentMode(enableAgent bool) ([]byte, error) { fp := path.Join("templates", "config", "prometheus.yml.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } // If agent mode is enabled, remove rule_files section from the template if enableAgent { // Remove the rule_files section which isn't allowed in agent mode // This is a simple string manipulation to remove the section - a more robust approach // would be to create a dedicated template for agent mode tplContent := string(tpl) ruleSectionStart := "rule_files:" startIndex := strings.Index(tplContent, ruleSectionStart) if startIndex >= 0 { // Find the end of the rule_files section (next section with same indentation) scrapeConfigsSection := "scrape_configs:" endIndex := strings.Index(tplContent[startIndex:], scrapeConfigsSection) if endIndex >= 0 { // Build a new template without the rule_files section newTemplate := tplContent[:startIndex] + tplContent[startIndex+endIndex:] return c.ConfigWithTemplate(newTemplate) } } } return c.ConfigWithTemplate(string(tpl)) } // ConfigWithTemplate generate the Prometheus config content by tpl func (c *PrometheusConfig) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("Prometheus").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } // ConfigToFile write config content to specific path func (c *PrometheusConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } tiup-1.16.3/pkg/cluster/template/config/prometheus_test.go000066400000000000000000000045561505422223000236730ustar00rootroot00000000000000// Copyright 2025 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "strings" "testing" ) func TestPrometheusConfigWithAgentMode(t *testing.T) { cfg := NewPrometheusConfig("test-cluster", "v6.1.0", false) cfg.AddPD("127.0.0.1", 2379) cfg.AddTiDB("127.0.0.1", 10080) cfg.AddTiKV("127.0.0.1", 20180) // Test normal mode config normalConfig, err := cfg.Config() if err != nil { t.Fatalf("Failed to generate normal config: %v", err) } // Verify that normal config contains rule_files if !strings.Contains(string(normalConfig), "rule_files:") { t.Error("Normal config should contain rule_files section") } // Test agent mode config agentConfig, err := cfg.ConfigWithAgentMode(true) if err != nil { t.Fatalf("Failed to generate agent config: %v", err) } // Verify that agent config doesn't contain rule_files if strings.Contains(string(agentConfig), "rule_files:") { t.Error("Agent mode config should not contain rule_files section") } // Verify that agent config contains scrape_configs if !strings.Contains(string(agentConfig), "scrape_configs:") { t.Error("Agent mode config should contain scrape_configs section") } } func TestConfigToFileWithAgentMode(t *testing.T) { // This is just a basic test to ensure the function doesn't panic // For real file operations, we'd need to use a test directory cfg := NewPrometheusConfig("test-cluster", "v6.1.0", false) // Generate a config string directly instead of writing to file agentConfig, err := cfg.ConfigWithAgentMode(true) if err != nil { t.Fatalf("Failed to generate agent config: %v", err) } // Verify basic structure of the output if !strings.Contains(string(agentConfig), "cluster: 'test-cluster'") { t.Error("Agent config should contain cluster name") } // Verify that rule_files section is removed if strings.Contains(string(agentConfig), "rule_files:") { t.Error("Agent mode config should not contain rule_files section") } } tiup-1.16.3/pkg/cluster/template/config/tispark.go000066400000000000000000000041111505422223000221010ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package config import ( "bytes" "path/filepath" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TiSparkConfig represent the data to generate TiSpark configs type TiSparkConfig struct { TiSparkMasters string CustomFields map[string]any Endpoints []string } // NewTiSparkConfig returns a TiSparkConfig func NewTiSparkConfig(pds []string) *TiSparkConfig { return &TiSparkConfig{Endpoints: pds} } // WithMasters sets master address func (c *TiSparkConfig) WithMasters(masters string) *TiSparkConfig { c.TiSparkMasters = masters return c } // WithCustomFields sets custom setting fields func (c *TiSparkConfig) WithCustomFields(m map[string]any) *TiSparkConfig { c.CustomFields = m return c } // Config generate the config file data. func (c *TiSparkConfig) Config() ([]byte, error) { fp := filepath.Join("templates", "config", "spark-defaults.conf.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigToFile write config content to specific path func (c *TiSparkConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } // ConfigWithTemplate parses the template file func (c *TiSparkConfig) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("TiSpark").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/install/000077500000000000000000000000001505422223000203015ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/template/install/local_install.sh.go000066400000000000000000000063441505422223000240700ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package install import "github.com/pingcap/tiup/pkg/utils" // WriteLocalInstallScript writes the install script into specified path func WriteLocalInstallScript(path string) error { return utils.WriteFile(path, []byte(script), 0755) } var script = `#!/bin/sh case $(uname -s) in Linux|linux) os=linux ;; Darwin|darwin) os=darwin ;; *) os= ;; esac if [ -z "$os" ]; then echo "OS $(uname -s) not supported." >&2 exit 1 fi case $(uname -m) in amd64|x86_64) arch=amd64 ;; arm64|aarch64) arch=arm64 ;; *) arch= ;; esac if [ -z "$arch" ]; then echo "Architecture $(uname -m) not supported." >&2 exit 1 fi if [ -z "$TIUP_HOME" ]; then TIUP_HOME=$HOME/.tiup fi bin_dir=$TIUP_HOME/bin mkdir -p "$bin_dir" script_dir=$(cd $(dirname $0) && pwd) install_binary() { tar -zxf "$script_dir/tiup-$os-$arch.tar.gz" -C "$bin_dir" || return 1 # Use the offline root.json cp "$script_dir/root.json" "$bin_dir" || return 1 # Remove old manifests rm -rf $TIUP_HOME/manifests return 0 } check_depends() { pass=0 command -v tar >/dev/null || { echo "Dependency check failed: please install 'tar' before proceeding." pass=1 } return $pass } if ! check_depends; then exit 1 fi if ! install_binary; then echo "Failed to download and/or extract tiup archive." exit 1 fi chmod 755 "$bin_dir/tiup" # set mirror to the local path "$bin_dir/tiup" mirror set ${script_dir} --silent bold=$(tput bold 2>/dev/null) green=$(tput setaf 2 2>/dev/null) cyan=$(tput setaf 6 2>/dev/null) sgr0=$(tput sgr0 2>/dev/null) echo # Reference: https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix shell=$(echo $SHELL | awk 'BEGIN {FS="/";} { print $NF }') echo "Detected shell: ${bold}$shell${sgr0}" if [ -f "${HOME}/.${shell}_profile" ]; then PROFILE=${HOME}/.${shell}_profile elif [ -f "${HOME}/.${shell}_login" ]; then PROFILE=${HOME}/.${shell}_login elif [ -f "${HOME}/.${shell}rc" ]; then PROFILE=${HOME}/.${shell}rc else PROFILE=${HOME}/.profile fi echo "Shell profile: ${bold}$PROFILE${sgr0}" echo echo "${bold}${green}✔ ${sgr0}Installed in ${bold}$bin_dir/tiup${sgr0}" case :$PATH: in *:$bin_dir:*) echo "${bold}${green}✔ ${sgr0}tiup PATH is already set, skip" ;; *) printf '\nexport PATH=%s:$PATH\n' "$bin_dir" >> "$PROFILE" echo "${bold}${green}✔ ${sgr0}Added tiup PATH into ${bold}${shell}${sgr0} profile" ;; esac echo echo "${bold}tiup is installed now${sgr0} 🎉" echo echo Next step: echo echo " 1: To make PATH change effective, restart your shell or execute:" echo " ${bold}${cyan}source ${PROFILE}${sgr0}" echo echo " 2: Start a local TiDB for development:" echo " ${bold}${cyan}tiup playground${sgr0}" ` tiup-1.16.3/pkg/cluster/template/scripts/000077500000000000000000000000001505422223000203225ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/template/scripts/alertmanager.go000066400000000000000000000027151505422223000233200ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // AlertManagerScript represent the data to generate AlertManager start script type AlertManagerScript struct { WebListenAddr string WebExternalURL string ClusterPeers []string ClusterListenAddr string DeployDir string LogDir string DataDir string NumaNode string AdditionalArgs []string } // ConfigToFile write config content to specific path func (c *AlertManagerScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_alertmanager.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("AlertManager").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/blackbox_exporter.go000066400000000000000000000045011505422223000243660ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // BlackboxExporterScript represent the data to generate BlackboxExporter config type BlackboxExporterScript struct { Port uint64 DeployDir string LogDir string NumaNode string } // NewBlackboxExporterScript returns a BlackboxExporterScript with given arguments func NewBlackboxExporterScript(deployDir, logDir string) *BlackboxExporterScript { return &BlackboxExporterScript{ Port: 9115, DeployDir: deployDir, LogDir: logDir, } } // WithPort set WebPort field of BlackboxExporterScript func (c *BlackboxExporterScript) WithPort(port uint64) *BlackboxExporterScript { c.Port = port return c } // WithNumaNode set NumaNode field of BlackboxExporterScript func (c *BlackboxExporterScript) WithNumaNode(numa string) *BlackboxExporterScript { c.NumaNode = numa return c } // Config generate the config file data. func (c *BlackboxExporterScript) Config() ([]byte, error) { fp := path.Join("templates", "scripts", "run_blackbox_exporter.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigToFile write config content to specific path func (c *BlackboxExporterScript) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } // ConfigWithTemplate generate the BlackboxExporter config content by tpl func (c *BlackboxExporterScript) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("BlackboxExporter").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/scripts/cdc.go000066400000000000000000000027531505422223000214110ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // CDCScript represent the data to generate cdc config type CDCScript struct { Addr string AdvertiseAddr string PD string GCTTL int64 TZ string ClusterID string DataDirEnabled bool ConfigFileEnabled bool TLSEnabled bool DeployDir string LogDir string DataDir string NumaNode string } // ConfigToFile write config content to specific file. func (c *CDCScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_cdc.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("CDC").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/dashboard.go000066400000000000000000000026271505422223000226070ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // DashboardScript represent the data to generate cdc config type DashboardScript struct { TidbVersion string Host string Port int DeployDir string LogDir string DataDir string NumaNode string TLSEnabled bool PD string } // ConfigToFile write config content to specific file. func (s *DashboardScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_tidb-dashboard.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("tidb-dashboard").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, s); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/dm_master.go000066400000000000000000000046741505422223000226370ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "errors" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // DMMasterScript represent the data to generate TiDB config type DMMasterScript struct { Name string V1SourcePath string MasterAddr string AdvertiseAddr string PeerURL string AdvertisePeerURL string InitialCluster string DeployDir string DataDir string LogDir string NumaNode string } // ConfigToFile write config content to specific path func (c *DMMasterScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_dm-master.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("dm-master").Parse(string(tpl)) if err != nil { return err } if c.Name == "" { return errors.New("empty name") } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } // DMMasterScaleScript represent the data to generate dm-master config on scaling type DMMasterScaleScript struct { Name string V1SourcePath string MasterAddr string AdvertiseAddr string PeerURL string AdvertisePeerURL string Join string DeployDir string DataDir string LogDir string NumaNode string } // ConfigToFile write config content to specific path func (c *DMMasterScaleScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_dm-master_scale.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("dm-master").Parse(string(tpl)) if err != nil { return err } if c.Name == "" { return errors.New("empty name") } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/dm_worker.go000066400000000000000000000026501505422223000226450ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // DMWorkerScript represent the data to generate TiDB config type DMWorkerScript struct { Name string WorkerAddr string AdvertiseAddr string Join string DeployDir string LogDir string NumaNode string Endpoints []*DMMasterScript IP string Port int } // ConfigToFile write config content to specific path func (c *DMWorkerScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_dm-worker.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("dm-worker").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/drainer.go000066400000000000000000000026061505422223000223010ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // DrainerScript represent the data to generate drainer config type DrainerScript struct { NodeID string Addr string PD string DeployDir string DataDir string LogDir string NumaNode string IP string Port int Endpoints []*PDScript } // ConfigToFile write config content to specific file. func (c *DrainerScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_drainer.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("Drainer").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/grafana.go000066400000000000000000000023511505422223000222510ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // GrafanaScript represent the data to generate Grafana config type GrafanaScript struct { DeployDir string } // ConfigToFile write config content to specific path func (c *GrafanaScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_grafana.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("Grafana").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/monitoring.go000066400000000000000000000027211505422223000230400ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // PrometheusScript represent the data to generate Prometheus config type PrometheusScript struct { Port int WebExternalURL string Retention string EnableNG bool EnablePromAgentMode bool DeployDir string DataDir string LogDir string NumaNode string AdditionalArgs []string } // ConfigToFile write config content to specific path func (c *PrometheusScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_prometheus.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("Prometheus").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/monitoring_test.go000066400000000000000000000062111505422223000240750ustar00rootroot00000000000000// Copyright 2025 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "os" "strings" "testing" ) func TestPrometheusScriptWithAgentMode(t *testing.T) { // Create a temporary file for testing tmpfile, err := os.CreateTemp("", "prometheus_script_test_*.sh") if err != nil { t.Fatalf("Failed to create temp file: %v", err) } defer os.Remove(tmpfile.Name()) defer tmpfile.Close() // Initialize Prometheus script with agent mode enabled script := &PrometheusScript{ Port: 9090, WebExternalURL: "http://localhost:9090", Retention: "30d", EnableNG: false, EnablePromAgentMode: true, DeployDir: "/deploy", LogDir: "/log", DataDir: "/data", NumaNode: "", AdditionalArgs: []string{"--some-additional-arg=value"}, } // Write to the temp file err = script.ConfigToFile(tmpfile.Name()) if err != nil { t.Fatalf("Failed to write config to file: %v", err) } // Read the file content content, err := os.ReadFile(tmpfile.Name()) if err != nil { t.Fatalf("Failed to read file: %v", err) } // Convert to string for easier assertions contentStr := string(content) // Verify agent mode flag is included if !strings.Contains(contentStr, "--enable-feature=agent") { t.Error("Agent mode script should contain the --enable-feature=agent flag") } // Verify storage flags are not present in agent mode if strings.Contains(contentStr, "--storage.tsdb.path") && strings.Contains(contentStr, "--storage.tsdb.retention") { t.Error("Agent mode script should not contain storage flags") } // Now test with agent mode disabled tmpfile2, err := os.CreateTemp("", "prometheus_script_test_normal_*.sh") if err != nil { t.Fatalf("Failed to create second temp file: %v", err) } defer os.Remove(tmpfile2.Name()) defer tmpfile2.Close() // Initialize Prometheus script with agent mode disabled script.EnablePromAgentMode = false // Write to the second temp file err = script.ConfigToFile(tmpfile2.Name()) if err != nil { t.Fatalf("Failed to write normal config to file: %v", err) } // Read the second file content content2, err := os.ReadFile(tmpfile2.Name()) if err != nil { t.Fatalf("Failed to read second file: %v", err) } contentStr2 := string(content2) // Verify normal mode doesn't have agent flag if strings.Contains(contentStr2, "--enable-feature=agent") { t.Error("Normal mode script should not contain the --enable-feature=agent flag") } // Verify storage flags are present in normal mode if !strings.Contains(contentStr2, "--storage.tsdb.path") || !strings.Contains(contentStr2, "--storage.tsdb.retention") { t.Error("Normal mode script should contain storage flags") } } tiup-1.16.3/pkg/cluster/template/scripts/node_exporter.go000066400000000000000000000043561505422223000235360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // NodeExporterScript represent the data to generate NodeExporter config type NodeExporterScript struct { Port uint64 DeployDir string LogDir string NumaNode string } // NewNodeExporterScript returns a NodeExporterScript with given arguments func NewNodeExporterScript(deployDir, logDir string) *NodeExporterScript { return &NodeExporterScript{ Port: 9100, DeployDir: deployDir, LogDir: logDir, } } // WithPort set Port field of NodeExporterScript func (c *NodeExporterScript) WithPort(port uint64) *NodeExporterScript { c.Port = port return c } // WithNumaNode set NumaNode field of NodeExporterScript func (c *NodeExporterScript) WithNumaNode(numa string) *NodeExporterScript { c.NumaNode = numa return c } // Config generate the config file data. func (c *NodeExporterScript) Config() ([]byte, error) { fp := path.Join("templates", "scripts", "run_node_exporter.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigToFile write config content to specific path func (c *NodeExporterScript) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } // ConfigWithTemplate generate the NodeExporter config content by tpl func (c *NodeExporterScript) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("NodeExporter").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/scripts/pd.go000066400000000000000000000045111505422223000212550ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "errors" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // PDScript represent the data to generate pd config type PDScript struct { Name string ClientURL string AdvertiseClientURL string PeerURL string AdvertisePeerURL string InitialCluster string DeployDir string DataDir string LogDir string NumaNode string MSMode bool } // ConfigToFile write config content to specific path func (c *PDScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_pd.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("PD").Parse(string(tpl)) if err != nil { return err } if c.Name == "" { return errors.New("empty name") } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } // PDScaleScript represent the data to generate pd config on scaling type PDScaleScript struct { PDScript Join string } // NewPDScaleScript return a new PDScaleScript func NewPDScaleScript(pdScript *PDScript, join string) *PDScaleScript { return &PDScaleScript{PDScript: *pdScript, Join: join} } // ConfigToFile write config content to specific path func (c *PDScaleScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_pd_scale.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("PD").Parse(string(tpl)) if err != nil { return err } if c.Name == "" { return errors.New("empty name") } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/pdms_test.go000066400000000000000000000041551505422223000226600ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "os" "strings" "testing" "github.com/stretchr/testify/require" ) func TestScheduling(t *testing.T) { assert := require.New(t) conf, err := os.CreateTemp("", "scheduling.conf") assert.Nil(err) defer os.Remove(conf.Name()) cfg := &SchedulingScript{ Name: "scheduling-0", ListenURL: "127.0.0.1", AdvertiseListenURL: "127.0.0.2", BackendEndpoints: "127.0.0.3", DeployDir: "/deploy", DataDir: "/data", LogDir: "/log", } err = cfg.ConfigToFile(conf.Name()) assert.Nil(err) content, err := os.ReadFile(conf.Name()) assert.Nil(err) assert.True(strings.Contains(string(content), "--name")) cfg.Name = "" err = cfg.ConfigToFile(conf.Name()) assert.Nil(err) content, err = os.ReadFile(conf.Name()) assert.Nil(err) assert.False(strings.Contains(string(content), "--name")) } func TestTSO(t *testing.T) { assert := require.New(t) conf, err := os.CreateTemp("", "tso.conf") assert.Nil(err) defer os.Remove(conf.Name()) cfg := &TSOScript{ Name: "tso-0", ListenURL: "127.0.0.1", AdvertiseListenURL: "127.0.0.2", BackendEndpoints: "127.0.0.3", DeployDir: "/deploy", DataDir: "/data", LogDir: "/log", } err = cfg.ConfigToFile(conf.Name()) assert.Nil(err) content, err := os.ReadFile(conf.Name()) assert.Nil(err) assert.True(strings.Contains(string(content), "--name")) cfg.Name = "" err = cfg.ConfigToFile(conf.Name()) assert.Nil(err) content, err = os.ReadFile(conf.Name()) assert.Nil(err) assert.False(strings.Contains(string(content), "--name")) } tiup-1.16.3/pkg/cluster/template/scripts/pump.go000066400000000000000000000025461505422223000216410ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // PumpScript represent the data to generate Pump config type PumpScript struct { NodeID string Addr string AdvertiseAddr string PD string DeployDir string DataDir string LogDir string NumaNode string } // ConfigToFile write config content to specific file. func (c *PumpScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_pump.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("Pump").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/scheduling.go000066400000000000000000000026361505422223000230050ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // SchedulingScript represent the data to generate scheduling config type SchedulingScript struct { Name string ListenURL string AdvertiseListenURL string BackendEndpoints string DeployDir string DataDir string LogDir string NumaNode string } // ConfigToFile write config content to specific path func (c *SchedulingScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_scheduling.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("Scheduling").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/scripts.go000066400000000000000000000014311505422223000223370ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "path/filepath" "github.com/pingcap/tiup/embed" ) // GetScript returns a raw config file from embed templates func GetScript(filename string) ([]byte, error) { fp := filepath.Join("templates", "scripts", filename) return embed.ReadTemplate(fp) } tiup-1.16.3/pkg/cluster/template/scripts/tidb.go000066400000000000000000000026211505422223000215740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TiDBScript represent the data to generate TiDB config type TiDBScript struct { Port int StatusPort int ListenHost string AdvertiseAddr string PD string SupportSecboot bool DeployDir string LogDir string NumaNode string NumaCores string } // ConfigToFile write config content to specific path func (c *TiDBScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_tidb.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("TiDB").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/tiflash.go000066400000000000000000000024721505422223000223100ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TiFlashScript represent the data to generate TiFlash config type TiFlashScript struct { RequiredCPUFlags string DeployDir string LogDir string NumaNode string NumaCores string } // ConfigToFile write config content to specific path func (c *TiFlashScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_tiflash.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("TiFlash").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/tikv.go000066400000000000000000000027611505422223000216340ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TiKVScript represent the data to generate TiKV config type TiKVScript struct { Addr string AdvertiseAddr string StatusAddr string SupportAdvertiseStatusAddr bool AdvertiseStatusAddr string PD string DeployDir string DataDir string LogDir string NumaNode string NumaCores string } // ConfigToFile write config content to specific path func (c *TiKVScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_tikv.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("TiKV").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/tikv_cdc.go000066400000000000000000000026361505422223000224460ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TiKVCDCScript represent the data to generate cdc config type TiKVCDCScript struct { Addr string AdvertiseAddr string PD string GCTTL int64 TZ string TLSEnabled bool DeployDir string LogDir string DataDir string NumaNode string } // ConfigToFile write config content to specific file. func (c *TiKVCDCScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_tikv-cdc.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("TiKVCDC").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/tiproxy.go000066400000000000000000000023731505422223000223740ustar00rootroot00000000000000// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TiProxyScript represent the data to generate tiproxy config type TiProxyScript struct { DeployDir string NumaNode string } // ConfigToFile write config content to specific file. func (c *TiProxyScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_tiproxy.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("TiProxy").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/scripts/tispark.go000066400000000000000000000060011505422223000223230ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "text/template" "github.com/pingcap/tiup/pkg/utils" ) // TiSparkEnv represent the data to generate TiSpark environment config type TiSparkEnv struct { Host string TiSparkMaster string TiSparkLocalIP string MasterPort int WorkerPort int MasterUIPort int WorkerUIPort int CustomEnvs map[string]string } // NewTiSparkEnv returns a TiSparkConfig func NewTiSparkEnv(host string) *TiSparkEnv { return &TiSparkEnv{Host: host} } // WithMaster sets master field func (c *TiSparkEnv) WithMaster(master string) *TiSparkEnv { c.TiSparkMaster = master return c } // WithCustomEnv sets custom setting fields func (c *TiSparkEnv) WithCustomEnv(m map[string]string) *TiSparkEnv { c.CustomEnvs = m return c } // WithLocalIP sets custom setting fields func (c *TiSparkEnv) WithLocalIP(ip string) *TiSparkEnv { c.TiSparkLocalIP = ip if ip == "0.0.0.0" { c.TiSparkLocalIP = "" // use empty result to fall back to spark's default } return c } // WithMasterPorts sets port for masters func (c *TiSparkEnv) WithMasterPorts(port, ui int) *TiSparkEnv { c.MasterPort = port c.MasterUIPort = ui return c } // WithWorkerPorts sets port for masters func (c *TiSparkEnv) WithWorkerPorts(port, ui int) *TiSparkEnv { c.WorkerPort = port c.WorkerUIPort = ui return c } // Script generate the script file data. func (c *TiSparkEnv) Script() ([]byte, error) { tpl, err := GetScript("spark-env.sh.tpl") if err != nil { return nil, err } return c.ScriptWithTemplate(string(tpl)) } // ScriptToFile write script content to specific path func (c *TiSparkEnv) ScriptToFile(file string) error { script, err := c.Script() if err != nil { return err } return utils.WriteFile(file, script, 0755) } // ScriptWithTemplate parses the template file func (c *TiSparkEnv) ScriptWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("TiSpark").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } // SlaveScriptWithTemplate parses the template file func (c *TiSparkEnv) SlaveScriptWithTemplate() ([]byte, error) { tpl, err := GetScript("start_tispark_slave.sh.tpl") if err != nil { return nil, err } tmpl, err := template.New("TiSpark").Parse(string(tpl)) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/scripts/tso.go000066400000000000000000000025641505422223000214650ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package scripts import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TSOScript represent the data to generate tso config type TSOScript struct { Name string ListenURL string AdvertiseListenURL string BackendEndpoints string DeployDir string DataDir string LogDir string NumaNode string } // ConfigToFile write config content to specific path func (c *TSOScript) ConfigToFile(file string) error { fp := path.Join("templates", "scripts", "run_tso.sh.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return err } tmpl, err := template.New("TSO").Parse(string(tpl)) if err != nil { return err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return err } return utils.WriteFile(file, content.Bytes(), 0755) } tiup-1.16.3/pkg/cluster/template/systemd/000077500000000000000000000000001505422223000203235ustar00rootroot00000000000000tiup-1.16.3/pkg/cluster/template/systemd/system.go000066400000000000000000000066571505422223000222140ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package system import ( "bytes" "path" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // Config represent the data to generate systemd config type Config struct { ServiceName string User string MemoryLimit string CPUQuota string IOReadBandwidthMax string IOWriteBandwidthMax string LimitCORE string DeployDir string DisableSendSigkill bool TimeoutStopSec string TimeoutStartSec string GrantCapNetRaw bool // Takes one of no, on-success, on-failure, on-abnormal, on-watchdog, on-abort, or always. // The Template set as always if this is not set. Restart string SystemdMode string } // NewConfig returns a Config with given arguments func NewConfig(service, user, deployDir string) *Config { return &Config{ ServiceName: service, User: user, DeployDir: deployDir, } } // WithMemoryLimit set the MemoryLimit field of Config func (c *Config) WithMemoryLimit(mem string) *Config { c.MemoryLimit = mem return c } // WithCPUQuota set the CPUQuota field of Config func (c *Config) WithCPUQuota(cpu string) *Config { c.CPUQuota = cpu return c } // WithIOReadBandwidthMax set the IOReadBandwidthMax field of Config func (c *Config) WithIOReadBandwidthMax(io string) *Config { c.IOReadBandwidthMax = io return c } // WithIOWriteBandwidthMax set the IOWriteBandwidthMax field of Config func (c *Config) WithIOWriteBandwidthMax(io string) *Config { c.IOWriteBandwidthMax = io return c } // WithLimitCORE set the LimitCORE field of Config func (c *Config) WithLimitCORE(core string) *Config { c.LimitCORE = core return c } // WithSystemdMode set the SystemdMode field of Config func (c *Config) WithSystemdMode(mode string) *Config { c.SystemdMode = mode return c } // WithTimeoutStopSec set the TimeoutStopSec field of Config func (c *Config) WithTimeoutStopSec(sec string) *Config { c.TimeoutStopSec = sec return c } // WithTimeoutStartSec set the TimeoutStartSec field of Config func (c *Config) WithTimeoutStartSec(sec string) *Config { c.TimeoutStartSec = sec return c } // ConfigToFile write config content to specific path func (c *Config) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } // Config generate the config file data. func (c *Config) Config() ([]byte, error) { fp := path.Join("templates", "systemd", "system.service.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigWithTemplate generate the system config content by tpl func (c *Config) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("system").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/systemd/tispark.go000066400000000000000000000043141505422223000223310ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package system import ( "bytes" "path" "strings" "text/template" "github.com/pingcap/tiup/embed" "github.com/pingcap/tiup/pkg/utils" ) // TiSparkConfig represent the data to generate systemd config type TiSparkConfig struct { ServiceName string User string DeployDir string JavaHome string // Takes one of no, on-success, on-failure, on-abnormal, on-watchdog, on-abort, or always. // The Template set as always if this is not set. Restart string } // NewTiSparkConfig returns a Config with given arguments func NewTiSparkConfig(service, user, deployDir, javaHome string) *TiSparkConfig { if strings.Contains(service, "master") { service = "master" } else if strings.Contains(service, "worker") { service = "slave" } return &TiSparkConfig{ ServiceName: service, User: user, DeployDir: deployDir, JavaHome: javaHome, } } // ConfigToFile write config content to specific path func (c *TiSparkConfig) ConfigToFile(file string) error { config, err := c.Config() if err != nil { return err } return utils.WriteFile(file, config, 0755) } // Config generate the config file data. func (c *TiSparkConfig) Config() ([]byte, error) { fp := path.Join("templates", "systemd", "tispark.service.tpl") tpl, err := embed.ReadTemplate(fp) if err != nil { return nil, err } return c.ConfigWithTemplate(string(tpl)) } // ConfigWithTemplate generate the system config content by tpl func (c *TiSparkConfig) ConfigWithTemplate(tpl string) ([]byte, error) { tmpl, err := template.New("system").Parse(tpl) if err != nil { return nil, err } content := bytes.NewBufferString("") if err := tmpl.Execute(content, c); err != nil { return nil, err } return content.Bytes(), nil } tiup-1.16.3/pkg/cluster/template/template.go000066400000000000000000000013461505422223000210010ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package template // ConfigGenerator is used to generate configuration for component type ConfigGenerator interface { Config() ([]byte, error) ConfigWithTemplate(tpl string) ([]byte, error) ConfigToFile(file string) error } tiup-1.16.3/pkg/crypto/000077500000000000000000000000001505422223000146575ustar00rootroot00000000000000tiup-1.16.3/pkg/crypto/ca.go000066400000000000000000000127271505422223000156020ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package crypto import ( cr "crypto/rand" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "math/big" "os" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/crypto/rand" ) var serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) // CertificateAuthority holds the CA of a cluster type CertificateAuthority struct { ClusterName string Cert *x509.Certificate Key PrivKey } // NewCA generates a new CertificateAuthority object func NewCA(clsName string) (*CertificateAuthority, error) { currTime := time.Now().UTC() // generate a random serial number for the new ca serialNumber, err := cr.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, err } caTemplate := &x509.Certificate{ SerialNumber: serialNumber, // NOTE: not adding cluster name to the cert subject to avoid potential issues // when we implement cluster renaming feature. We may consider add this back // if we find proper way renaming a TLS enabled cluster. // Adding the cluster name in cert subject may be helpful to diagnose problem // when a process is trying to connecting a component from another cluster. Subject: pkix.Name{ Organization: []string{pkixOrganization}, OrganizationalUnit: []string{pkixOrganizationalUnit /*, clsName */}, }, NotBefore: currTime, NotAfter: currTime.Add(time.Hour * 24 * 365 * 50), // TODO: support ca cert rotate IsCA: true, // must be true KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, }, BasicConstraintsValid: true, } priv, err := NewKeyPair(KeyTypeRSA, KeySchemeRSASSAPSSSHA256) if err != nil { return nil, err } caBytes, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, priv.Public().Key(), priv.Signer()) if err != nil { return nil, err } caCert, err := x509.ParseCertificate(caBytes) if err != nil { return nil, err } return &CertificateAuthority{ ClusterName: clsName, Cert: caCert, Key: priv, }, nil } // Sign signs a CSR with the CA func (ca *CertificateAuthority) Sign(csrBytes []byte) ([]byte, error) { csr, err := x509.ParseCertificateRequest(csrBytes) if err != nil { return nil, err } if err := csr.CheckSignature(); err != nil { return nil, err } currTime := time.Now().UTC() if !currTime.Before(ca.Cert.NotAfter) { return nil, errors.Errorf("the signer has expired: NotAfter=%v", ca.Cert.NotAfter) } // generate a random serial number for the new cert serialNumber, err := cr.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, err } template := &x509.Certificate{ Signature: csr.Signature, SignatureAlgorithm: csr.SignatureAlgorithm, PublicKey: csr.PublicKey, PublicKeyAlgorithm: csr.PublicKeyAlgorithm, SerialNumber: serialNumber, Issuer: ca.Cert.Issuer, Subject: csr.Subject, DNSNames: csr.DNSNames, IPAddresses: csr.IPAddresses, EmailAddresses: csr.EmailAddresses, URIs: csr.URIs, NotBefore: currTime, NotAfter: currTime.Add(time.Hour * 24 * 365 * 10), KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, }, Extensions: csr.Extensions, ExtraExtensions: csr.ExtraExtensions, } return x509.CreateCertificate(rand.Reader, template, ca.Cert, csr.PublicKey, ca.Key.Signer()) } // ReadCA reads an existing CA certificate from disk func ReadCA(clsName, certPath, keyPath string) (*CertificateAuthority, error) { // read private key rawKey, err := os.ReadFile(keyPath) if err != nil { return nil, errors.Annotatef(err, "error reading CA private key for %s", clsName) } keyPem, _ := pem.Decode(rawKey) if keyPem == nil { return nil, errors.Errorf("error decoding CA private key for %s", clsName) } var privKey PrivKey switch keyPem.Type { case "RSA PRIVATE KEY": pk, err := x509.ParsePKCS1PrivateKey(keyPem.Bytes) if err != nil { return nil, errors.Annotatef(err, "error decoding CA private key for %s", clsName) } privKey = &RSAPrivKey{key: pk} default: return nil, errors.Errorf("the CA private key type \"%s\" is not supported", keyPem.Type) } // read certificate rawCert, err := os.ReadFile(certPath) if err != nil { return nil, errors.Annotatef(err, "error reading CA certificate for %s", clsName) } certPem, _ := pem.Decode(rawCert) if certPem == nil { return nil, errors.Errorf("error decoding CA certificate for %s", clsName) } if certPem.Type != "CERTIFICATE" { return nil, errors.Errorf("the CA certificate type \"%s\" is not valid", certPem.Type) } cert, err := x509.ParseCertificate(certPem.Bytes) if err != nil { return nil, errors.Annotatef(err, "error decoding CA certificate for %s", clsName) } return &CertificateAuthority{ ClusterName: clsName, Cert: cert, Key: privKey, }, nil } tiup-1.16.3/pkg/crypto/ca_test.go000066400000000000000000000070671505422223000166420ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package crypto import ( "crypto/x509" "fmt" "testing" "slices" "github.com/stretchr/testify/assert" ) func TestNewCA(t *testing.T) { clsName := "testing-ca" ca, err := NewCA(clsName) assert.Nil(t, err) assert.NotEmpty(t, ca.Cert) assert.NotEmpty(t, ca.Key) // check if it's a CA cert assert.True(t, ca.Cert.IsCA) // check for key subject assert.NotEmpty(t, ca.Cert.Subject.Organization) assert.Equal(t, pkixOrganization, ca.Cert.Subject.Organization[0]) assert.NotEmpty(t, ca.Cert.Subject.OrganizationalUnit) assert.Equal(t, pkixOrganizationalUnit, ca.Cert.Subject.OrganizationalUnit[0]) // assert.Equal(t, clsName, ca.Cert.Subject.OrganizationalUnit[1]) // check for key usage assert.Equal(t, x509.KeyUsage(33), ca.Cert.KeyUsage) // x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature // check for extended usage err = func(cert *x509.Certificate) error { for _, usage := range []x509.ExtKeyUsage{ // expected extended key usage list x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, } { if func(a x509.ExtKeyUsage, s []x509.ExtKeyUsage) bool { return slices.Contains(s, a) }(usage, cert.ExtKeyUsage) { continue } return fmt.Errorf("extended key usage %v not found in generated CA cert", usage) } return nil }(ca.Cert) assert.Nil(t, err) } func TestCASign(t *testing.T) { // generate ca ca, err := NewCA("testing-ca") assert.Nil(t, err) // generate cert privKey, err := NewKeyPair(KeyTypeRSA, KeySchemeRSASSAPSSSHA256) assert.Nil(t, err) csr, err := privKey.CSR("tidb", "testing-cn", []string{ "tidb-server", "tidb.server.local", }, []string{ "10.0.0.1", "1.2.3.4", "fe80:2333::dead:beef", "2403:5180:5:c37d::", }) assert.Nil(t, err) certBytes, err := ca.Sign(csr) assert.Nil(t, err) cert, err := x509.ParseCertificate(certBytes) assert.Nil(t, err) assert.False(t, cert.IsCA) assert.Equal(t, ca.Cert.Issuer, cert.Issuer) assert.Equal(t, []string{pkixOrganization}, cert.Subject.Organization) assert.Equal(t, []string{pkixOrganizationalUnit, "tidb"}, cert.Subject.OrganizationalUnit) assert.Equal(t, "testing-cn", cert.Subject.CommonName) assert.Equal(t, []string{"tidb-server", "tidb.server.local"}, cert.DNSNames) assert.Equal(t, "10.0.0.1", cert.IPAddresses[0].String()) assert.Equal(t, "1.2.3.4", cert.IPAddresses[1].String()) assert.Equal(t, "fe80:2333::dead:beef", cert.IPAddresses[2].String()) assert.Equal(t, "2403:5180:5:c37d::", cert.IPAddresses[3].String()) // check for key usage assert.Equal(t, x509.KeyUsage(5), cert.KeyUsage) // x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment // check for extended usage err = func(cert *x509.Certificate) error { for _, usage := range []x509.ExtKeyUsage{ // expected extended key usage list x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, } { if func(a x509.ExtKeyUsage, s []x509.ExtKeyUsage) bool { return slices.Contains(s, a) }(usage, cert.ExtKeyUsage) { continue } return fmt.Errorf("extended key usage %v not found in signed cert", usage) } return nil }(cert) assert.Nil(t, err) } tiup-1.16.3/pkg/crypto/keys.go000066400000000000000000000103501505422223000161600ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package crypto import ( "crypto" "crypto/x509" "errors" ) var ( // ErrorKeyUninitialized will be present when key is used before Deserialize called ErrorKeyUninitialized = errors.New("key not initialized, call Deserialize first") // ErrorDeserializeKey means the key format is not valid ErrorDeserializeKey = errors.New("error on deserialize key, check if the key is valid") // ErrorUnsupportedKeyType means we don't supported this type of key ErrorUnsupportedKeyType = errors.New("provided key type not supported") // ErrorUnsupportedKeySchema means we don't support this schema ErrorUnsupportedKeySchema = errors.New("provided schema not supported") ) const ( // KeyTypeRSA represents the RSA type of keys KeyTypeRSA = "rsa" // KeySchemeRSASSAPSSSHA256 represents rsassa-pss-sha256 scheme KeySchemeRSASSAPSSSHA256 = "rsassa-pss-sha256" // strings used for cert subject pkixOrganization = "PingCAP" pkixOrganizationalUnit = "TiUP" // PKCS12Password is a hard-coded password for PKCS#12 file, it is by // intend to use pre-defined string instead of generated every time, // as the encryption of PKCS#12 it self is weak. The key should be // protected by other means. PKCS12Password = "tiup" ) // Serializable represents object that can be serialized and deserialized type Serializable interface { // Translate the key to the format that can be stored Serialize() ([]byte, error) // Deserialize a key from data Deserialize([]byte) error } // PubKey is a public key available to TiUP type PubKey interface { Serializable // Type returns the type of the key, e.g. RSA Type() string // Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 Scheme() string // Key returns the raw public key Key() crypto.PublicKey // VerifySignature check the signature is right VerifySignature(payload []byte, sig string) error } // PrivKey is the private key that provide signature method type PrivKey interface { Serializable // Type returns the type of the key, e.g. RSA Type() string // Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 Scheme() string // Signature sign a signature with the key for payload Signature(payload []byte) (string, error) // Signer returns the signer of the private key Signer() crypto.Signer // Public returns public key of the PrivKey Public() PubKey // Pem returns the raw private key in PEM format Pem() []byte // CSR creates a new CSR from the private key CSR(role, commonName string, hostList []string, IPList []string) ([]byte, error) // PKCS12 encodes the certificate to a pfxData PKCS12(cert *x509.Certificate, ca *CertificateAuthority) ([]byte, error) } // NewKeyPair return a pair of key func NewKeyPair(keyType, keyScheme string) (PrivKey, error) { // We only support RSA now if keyType != KeyTypeRSA { return nil, ErrorUnsupportedKeyType } // We only support rsassa-pss-sha256 now if keyScheme != KeySchemeRSASSAPSSSHA256 { return nil, ErrorUnsupportedKeySchema } return RSAPair() } // NewPrivKey return PrivKey func NewPrivKey(keyType, keyScheme string, key []byte) (PrivKey, error) { // We only support RSA now if keyType != KeyTypeRSA { return nil, ErrorUnsupportedKeyType } // We only support rsassa-pss-sha256 now if keyScheme != KeySchemeRSASSAPSSSHA256 { return nil, ErrorUnsupportedKeySchema } priv := &RSAPrivKey{} return priv, priv.Deserialize(key) } // NewPubKey return PrivKey func NewPubKey(keyType, keyScheme string, key []byte) (PubKey, error) { // We only support RSA now if keyType != KeyTypeRSA { return nil, ErrorUnsupportedKeyType } // We only support rsassa-pss-sha256 now if keyScheme != KeySchemeRSASSAPSSSHA256 { return nil, ErrorUnsupportedKeySchema } pub := &RSAPubKey{} return pub, pub.Deserialize(key) } tiup-1.16.3/pkg/crypto/rand/000077500000000000000000000000001505422223000156035ustar00rootroot00000000000000tiup-1.16.3/pkg/crypto/rand/passwd.go000066400000000000000000000027731505422223000174440ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package rand import ( "github.com/sethvargo/go-password/password" ) // charsets with some in similar shapes removed (e.g., O, o, I, l, etc.) const ( lowerLetters = "abcdefghijkmnpqrstuvwxyz" upperLetters = "ABCDEFGHJKLMNPQRSTUVWXYZ" digits = "0123456789" symbols = "@^*+-_" ) // Password generates a random password func Password(length int) (string, error) { if length < 8 { panic("password length muster be at least 8.") } gi := &password.GeneratorInput{ LowerLetters: lowerLetters, UpperLetters: upperLetters, Digits: digits, Symbols: symbols, Reader: Reader, } g, err := password.NewGenerator(gi) if err != nil { return "", err } // 1/3 of the password are digits and 1/4 of it are symbols numDigits := length / 3 numSymbols := length / 4 // allow repeat if the length is longer than the shortest charset allowRepeat := (numDigits > len(digits) || numSymbols > len(symbols)) return g.Generate(length, numDigits, numSymbols, false, allowRepeat) } tiup-1.16.3/pkg/crypto/rand/passwd_test.go000066400000000000000000000015071505422223000204750ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package rand import ( "testing" ) func TestPasswd(t *testing.T) { for range 100 { l := Intn(64) if l < 8 { // make sure it's greater than 8 l += 8 } t.Logf("generating random password of length %d", l) p, e := Password(l) if e != nil { t.Error(e) } t.Log(p) if len(p) != l { t.Fail() } } } tiup-1.16.3/pkg/crypto/rand/rand.go000066400000000000000000000024171505422223000170620ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package rand import ( "crypto/rand" "io" "math/big" ) // rand provides a simple wrap of "crypto/rand". // Reader is a global random number source var Reader io.Reader = &cryptoRandReader{} type cryptoRandReader struct{} func (c *cryptoRandReader) Read(b []byte) (int, error) { return rand.Read(b) } // Int wraps Int63n func Int() int { val := Int63n(int64(int(^uint(0) >> 1))) return int(val) } // Intn wraps Int63n func Intn(n int) int { if n <= 0 { panic("argument to Intn must be positive") } val := Int63n(int64(n)) return int(val) } // Int63n wraps rand.Int func Int63n(n int64) int64 { if n <= 0 { panic("argument to Int63n must be positive") } val, err := rand.Int(rand.Reader, big.NewInt(n)) if err != nil { panic(err) } return val.Int64() } tiup-1.16.3/pkg/crypto/rsa.go000066400000000000000000000124271505422223000160010ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package crypto import ( "crypto" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/base64" "encoding/pem" "net" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/crypto/rand" "software.sslmate.com/src/go-pkcs12" ) // RSAKeyLength define the length of RSA keys const RSAKeyLength = 2048 // RSAPair generate a pair of rsa keys func RSAPair() (*RSAPrivKey, error) { key, err := rsa.GenerateKey(rand.Reader, RSAKeyLength) if err != nil { return nil, err } return &RSAPrivKey{key}, nil } // RSAPubKey represents the public key of RSA type RSAPubKey struct { key *rsa.PublicKey } // Type returns the type of the key, e.g. RSA func (k *RSAPubKey) Type() string { return KeyTypeRSA } // Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 func (k *RSAPubKey) Scheme() string { return KeySchemeRSASSAPSSSHA256 } // Key returns the raw public key func (k *RSAPubKey) Key() crypto.PublicKey { return k.key } // Serialize generate the pem format for a key func (k *RSAPubKey) Serialize() ([]byte, error) { asn1Bytes, err := x509.MarshalPKIXPublicKey(k.key) if err != nil { return nil, err } pemKey := &pem.Block{ Type: "PUBLIC KEY", Bytes: asn1Bytes, } return pem.EncodeToMemory(pemKey), nil } // Deserialize generate a public key from pem format func (k *RSAPubKey) Deserialize(key []byte) error { block, _ := pem.Decode(key) if block == nil { return ErrorDeserializeKey } pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { return err } k.key = pubInterface.(*rsa.PublicKey) return nil } // VerifySignature check the signature is right func (k *RSAPubKey) VerifySignature(payload []byte, sig string) error { if k.key == nil { return ErrorKeyUninitialized } sha256 := crypto.SHA256.New() _, err := sha256.Write(payload) if err != nil { return errors.AddStack(err) } hashed := sha256.Sum(nil) b64decSig, err := base64.StdEncoding.DecodeString(sig) if err != nil { return err } return rsa.VerifyPSS(k.key, crypto.SHA256, hashed, b64decSig, nil) } // RSAPrivKey represents the private key of RSA type RSAPrivKey struct { key *rsa.PrivateKey } // Type returns the type of the key, e.g. RSA func (k *RSAPrivKey) Type() string { return KeyTypeRSA } // Scheme returns the scheme of signature algorithm, e.g. rsassa-pss-sha256 func (k *RSAPrivKey) Scheme() string { return KeySchemeRSASSAPSSSHA256 } // Serialize generate the pem format for a key func (k *RSAPrivKey) Serialize() ([]byte, error) { pemKey := &pem.Block{ Type: "PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k.key), } return pem.EncodeToMemory(pemKey), nil } // Deserialize generate a private key from pem format func (k *RSAPrivKey) Deserialize(key []byte) error { block, _ := pem.Decode(key) if block == nil { return ErrorDeserializeKey } privKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return err } k.key = privKey return nil } // Signature sign a signature with the key for payload func (k *RSAPrivKey) Signature(payload []byte) (string, error) { if k.key == nil { return "", ErrorKeyUninitialized } sha256 := crypto.SHA256.New() _, err := sha256.Write(payload) if err != nil { return "", errors.AddStack(err) } hashed := sha256.Sum(nil) sig, err := rsa.SignPSS(rand.Reader, k.key, crypto.SHA256, hashed, nil) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(sig), nil } // Public returns public key of the PrivKey func (k *RSAPrivKey) Public() PubKey { return &RSAPubKey{ key: &k.key.PublicKey, } } // Signer returns the signer of the private key func (k *RSAPrivKey) Signer() crypto.Signer { return k.key } // Pem returns the raw private key im PEM format func (k *RSAPrivKey) Pem() []byte { return pem.EncodeToMemory(&pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k.key), }) } // CSR generates a new CSR from given private key func (k *RSAPrivKey) CSR(role, commonName string, hostList, ipList []string) ([]byte, error) { var ipAddrList []net.IP for _, ip := range ipList { ipAddr := net.ParseIP(ip) ipAddrList = append(ipAddrList, ipAddr) } // set CSR attributes csrTemplate := &x509.CertificateRequest{ Subject: pkix.Name{ Organization: []string{pkixOrganization}, OrganizationalUnit: []string{pkixOrganizationalUnit, role}, CommonName: commonName, }, DNSNames: hostList, IPAddresses: ipAddrList, } csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, k.key) if err != nil { return nil, err } return csr, nil } // PKCS12 encodes the private and certificate to a PKCS#12 pfxData func (k *RSAPrivKey) PKCS12(cert *x509.Certificate, ca *CertificateAuthority) ([]byte, error) { return pkcs12.Encode( rand.Reader, k.key, cert, []*x509.Certificate{ca.Cert}, PKCS12Password, ) } tiup-1.16.3/pkg/crypto/rsa_test.go000066400000000000000000000042571505422223000170420ustar00rootroot00000000000000//go:debug rsa1024min=0 package crypto import ( "testing" "github.com/stretchr/testify/assert" ) var cases = [][]byte{ []byte(`TiDB is an awesome database`), []byte(`I like coding...`), []byte(`I hate talking...`), []byte(`Junk food is good`), } var ( publicTestKey = []byte(` -----BEGIN PUBLIC KEY----- MIGeMA0GCSqGSIb3DQEBAQUAA4GMADCBiAKBgF6a4nJojBUcNmxWu7nXFBjlUew3 Se2N3Wqj3BVLYwPWOsrGynPHXh1MF3naIenVty+mQlfqfC/RAFtR31ImHQDBOG2Y mQ/gzxHFWUarmR2nNF8DCbjF9D2JOCStisx79sB0LzF0/7nLEeivRv9lgIQZgOG5 Z0QlIzzy3Ymxu5s1AgMBAAE= -----END PUBLIC KEY----- `) privateTestKey = []byte(` -----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgF6a4nJojBUcNmxWu7nXFBjlUew3Se2N3Wqj3BVLYwPWOsrGynPH Xh1MF3naIenVty+mQlfqfC/RAFtR31ImHQDBOG2YmQ/gzxHFWUarmR2nNF8DCbjF 9D2JOCStisx79sB0LzF0/7nLEeivRv9lgIQZgOG5Z0QlIzzy3Ymxu5s1AgMBAAEC gYBBuoCMFoEFBbX2LYh+BKWM6n6xjHRLnN3yAmidTuQ7PTNZwSXVrPWBi2VgHqKj UP3WGEBNzrd7jU0fJVHwRFSvXvTNho5JyWIACpxu7+KQ6X83hxLR9hM4bMIP19Qg qNgIdU2OvAPKmtv4CM8VNTlDeB7HpdZNwJh6BFp6ykidHQJBALmPZpa2oO3PZ31g 9V3Q6KY5kvuSbuGG879y9S/+WnP2tY7VLjPHkyhAcLxEdtsR/SJ+xwYYoeQLOcqE twm91MMCQQCChIfEZP4PnJvyS3Zv85hvC/Bxcb0+2tpMVBbzILsSFppnMs13+kBn qyAF/ugpvKJFLHEFjOW9P+p7eEXv5fCnAkEAoXTlDr5ZyJJuueljlf3wcLIn8j23 vQRvkmW0cc4fZkeEMoPLb8J3iM6JSUdJI9TDLQCiq+tC8enSnyRbH17NgQJAKFkY L5qY//KGMy0o/AruQMYMGsXynw/BFH+aaKbhrgHW0bhe1IxEhMfeKnxXATATahcH CZQ5IXw03N6doEARWQJBAKy+MMXxB7D4CM6qYBCPvzQD/MLFxQCJCrf7r2a3OSHv xszs/Mo/8gc28hoBwrkWBIUjY5leRR2TIZnGzZ1tZZk= -----END RSA PRIVATE KEY----- `) ) func TestSignAndVerify(t *testing.T) { priv, err := RSAPair() assert.Nil(t, err) for _, cas := range cases { sig, err := priv.Signature(cas) assert.Nil(t, err) assert.Nil(t, priv.Public().VerifySignature(cas, sig)) } } func TestSeriAndDeseri(t *testing.T) { pub := RSAPubKey{} pri := RSAPrivKey{} _, err := pri.Signature([]byte("foo")) assert.EqualError(t, err, ErrorKeyUninitialized.Error()) assert.EqualError(t, pub.VerifySignature([]byte(`foo`), "sig"), ErrorKeyUninitialized.Error()) assert.Nil(t, pub.Deserialize(publicTestKey)) assert.Nil(t, pri.Deserialize(privateTestKey)) for _, cas := range cases { sig, err := pri.Signature(cas) assert.Nil(t, err) assert.Nil(t, pub.VerifySignature(cas, sig)) } } tiup-1.16.3/pkg/environment/000077500000000000000000000000001505422223000157035ustar00rootroot00000000000000tiup-1.16.3/pkg/environment/debug.go000066400000000000000000000013761505422223000173270ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package environment import ( "os" "github.com/pingcap/tiup/pkg/localdata" ) // Global flags var ( DebugMode = false ) func init() { val := os.Getenv(localdata.EnvNameDebug) DebugMode = (val == "enable" || val == "enabled" || val == "true") } tiup-1.16.3/pkg/environment/env.go000066400000000000000000000214651505422223000170320ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package environment import ( "fmt" "os" "path/filepath" "sort" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" "golang.org/x/mod/semver" ) var ( // ErrInstallFirst indicates that a component/version is not installed ErrInstallFirst = errors.New("component not installed") ) // Mirror return mirror of tiup. // If it's not defined, it will use "https://tiup-mirrors.pingcap.com/". func Mirror() string { profile := localdata.InitProfile() cfg := profile.Config reset := func(m string) { os.Setenv(repository.EnvMirrors, m) if err := profile.ResetMirror(m, ""); err != nil { fmt.Fprintf(os.Stderr, "WARNING: reset mirror failed, %s\n", err.Error()) } } m := os.Getenv(repository.EnvMirrors) if m != "" { if cfg.Mirror != m { fmt.Fprintf(os.Stderr, `WARNING: both mirror config (%s) and TIUP_MIRRORS (%s) have been set. Setting mirror to TIUP_MIRRORS (%s) `, cfg.Mirror, m, m) reset(m) } return m } else if cfg.Mirror != "" { os.Setenv(repository.EnvMirrors, cfg.Mirror) return cfg.Mirror } return repository.DefaultMirror } // Environment is the user's fundamental configuration including local and remote parts. type Environment struct { // profile represents the TiUP local profile profile *localdata.Profile // repo represents the components repository of TiUP, it can be a // local file system or a HTTP URL v1Repo repository.Repository } // InitEnv creates a new Environment object configured using env vars and defaults. func InitEnv(options repository.Options, mOpt repository.MirrorOptions) (*Environment, error) { if env := GlobalEnv(); env != nil { return env, nil } initRepo := time.Now() profile := localdata.InitProfile() // Initialize the repository // Replace the mirror if some sub-commands use different mirror address mirrorAddr := Mirror() mirror := repository.NewMirror(mirrorAddr, mOpt) if err := mirror.Open(); err != nil { return nil, err } var v1repo *repository.V1Repository var err error var local v1manifest.LocalManifests local, err = v1manifest.NewManifests(profile) if err != nil { return nil, errors.Annotatef(err, "initial repository from mirror(%s) failed", mirrorAddr) } v1repo = repository.NewV1Repo(mirror, options, local) zap.L().Debug("Initialize repository finished", zap.Duration("duration", time.Since(initRepo))) return &Environment{profile, v1repo}, nil } // V1Repository returns the initialized v1 repository func (env *Environment) V1Repository() repository.Repository { return env.v1Repo } // Profile returns the profile of local data func (env *Environment) Profile() *localdata.Profile { return env.profile } // Close release resource of env. func (env *Environment) Close() error { // no need for v1manifest return nil } // SetProfile exports for test func (env *Environment) SetProfile(p *localdata.Profile) { env.profile = p } // LocalPath returns the local path absolute path func (env *Environment) LocalPath(path ...string) string { return env.profile.Path(filepath.Join(path...)) } // UpdateComponents updates or installs all components described by specs. func (env *Environment) UpdateComponents(specs []string, nightly, force bool) error { var v1specs []repository.ComponentSpec for _, spec := range specs { component, v := ParseCompVersion(spec) if v == "" && nightly { v = utils.NightlyVersionAlias } v1specs = append(v1specs, repository.ComponentSpec{ID: component, Version: v.String(), Force: force}) } return env.v1Repo.UpdateComponents(v1specs) } // SelfUpdate updates TiUP. func (env *Environment) SelfUpdate() error { if err := env.v1Repo.DownloadTiUP(env.LocalPath("bin")); err != nil { return err } // Cover the root.json from tiup.bar.gz return localdata.InitProfile().ResetMirror(Mirror(), "") } // SelectInstalledVersion selects the installed versions and the latest release version // will be chosen if there is an empty version func (env *Environment) SelectInstalledVersion(component string, ver utils.Version) (utils.Version, error) { installed, err := env.Profile().InstalledVersions(component) if err != nil { return ver, err } versions := []string{} for _, v := range installed { vi, err := env.v1Repo.LocalComponentVersion(component, v, true) if errors.Cause(err) == repository.ErrUnknownVersion { continue } if err != nil { return ver, err } if vi.Yanked { continue } versions = append(versions, v) } // Reverse sort: v5.0.0-rc,v5.0.0-nightly-20210305,v4.0.11 sort.Slice(versions, func(i, j int) bool { return semver.Compare(versions[i], versions[j]) > 0 }) errInstallFirst := errors.Annotatef(ErrInstallFirst, "use `tiup install %s` to install component `%s` first", component, component) if !ver.IsEmpty() { errInstallFirst = errors.Annotatef(ErrInstallFirst, "use `tiup install %s:%s` to install specified version", component, ver.String()) } if ver.IsEmpty() || string(ver) == utils.NightlyVersionAlias { var selected utils.Version for _, v := range versions { // only select nightly for nightly if (string(ver) == utils.NightlyVersionAlias) != utils.Version(v).IsNightly() { continue } if semver.Prerelease(v) == "" { return utils.Version(v), nil } // select prerelease version when there is only prelease version on local if selected.IsEmpty() { selected = utils.Version(v) } } if !selected.IsEmpty() { return selected, nil } } else { for _, v := range versions { if utils.Version(v) == ver { return ver, nil } } } return ver, errInstallFirst } // DownloadComponentIfMissing downloads the specific version of a component if it is missing func (env *Environment) DownloadComponentIfMissing(component string, ver utils.Version) (utils.Version, error) { var err error if ver.String() == utils.NightlyVersionAlias { if ver, _, err = env.v1Repo.LatestNightlyVersion(component); err != nil { return "", err } } // Use the latest version if user doesn't specify a specific version and // download the latest version if the specific component doesn't be installed // Check whether the specific version exist in local ver, err = env.SelectInstalledVersion(component, ver) needDownload := errors.Cause(err) == ErrInstallFirst if err != nil && !needDownload { return "", err } if needDownload { fmt.Fprintf(os.Stderr, "The component `%s` version %s is not installed; downloading from repository.\n", component, ver.String()) spec := repository.ComponentSpec{ ID: component, Version: string(ver), Force: false, } if err := env.v1Repo.UpdateComponents([]repository.ComponentSpec{spec}); err != nil { return "", err } } if ver.IsEmpty() { return env.SelectInstalledVersion(component, ver) } return ver, nil } // BinaryPath return the installed binary path. func (env *Environment) BinaryPath(component string, ver utils.Version) (string, error) { installPath, err := env.profile.ComponentInstalledPath(component, ver) if err != nil { return "", err } return env.v1Repo.BinaryPath(installPath, component, ver.String()) } // Link add soft link to $TIUP_HOME/bin/ func (env *Environment) Link(component string, version utils.Version) error { version, err := env.SelectInstalledVersion(component, version) if err != nil { return err } binPath, err := env.BinaryPath(component, version) if err != nil { return err } target := env.LocalPath("bin", filepath.Base(binPath)) backup := target + ".old" exist := true _, err = os.Stat(target) if err != nil { if !os.IsNotExist(err) { return err } exist = false } if exist { if err := os.Rename(target, backup); err != nil { fmt.Printf("Backup of `%s` to `%s` failed.\n", target, backup) return err } } fmt.Printf("package %s provides these executables: %s\n", component, filepath.Base(binPath)) err = os.Symlink(binPath, target) if err != nil { defer func() { _ = os.Rename(backup, target) }() } else { defer func() { _ = os.Remove(backup) }() } return err } // ParseCompVersion parses component part from [:version] specification func ParseCompVersion(spec string) (string, utils.Version) { if strings.Contains(spec, ":") { parts := strings.SplitN(spec, ":", 2) return parts[0], utils.Version(parts[1]) } return spec, "" } tiup-1.16.3/pkg/environment/history.go000066400000000000000000000136601505422223000177410ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package environment import ( "bufio" "encoding/json" "fmt" "io" "io/fs" "os" "path/filepath" "sort" "strconv" "strings" "time" "github.com/fatih/color" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) const ( // HistoryDir history save path HistoryDir = "history" historyPrefix = "tiup-history-" historySize int64 = 1024 * 64 // history file default size is 64k ) // commandRow type of command history row type historyRow struct { Date time.Time `json:"time"` Command string `json:"command"` Code int `json:"exit_code"` } // historyItem record history row file item type historyItem struct { path string info fs.FileInfo index int } // HistoryRecord record tiup exec cmd func HistoryRecord(env *Environment, command []string, date time.Time, code int) error { if env == nil { return nil } historyPath := env.LocalPath(HistoryDir) if utils.IsNotExist(historyPath) { err := utils.MkdirAll(historyPath, 0755) if err != nil { return err } } h := &historyRow{ Command: strings.Join(HidePassword(command), " "), Date: date, Code: code, } return h.save(historyPath) } // save save commandRow to file func (r *historyRow) save(dir string) error { rBytes, err := json.Marshal(r) if err != nil { return err } historyFile := getLatestHistoryFile(dir) f, err := os.OpenFile(historyFile.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644) if err != nil { return err } defer f.Close() _, err = f.Write(append(rBytes, []byte("\n")...)) return err } // GetHistory get tiup history func (env *Environment) GetHistory(count int, all bool) ([]*historyRow, error) { fList, err := getHistoryFileList(env.LocalPath(HistoryDir)) if err != nil { return nil, err } rows := []*historyRow{} for _, f := range fList { rs, err := f.getHistory() if err != nil { return rows, err } if (len(rows)+len(rs)) > count && !all { i := len(rows) + len(rs) - count rows = append(rs[i:], rows...) break } rows = append(rs, rows...) } return rows, nil } // DeleteHistory delete history file func (env *Environment) DeleteHistory(retainDays int, skipConfirm bool) error { if retainDays < 0 { return errors.Errorf("retainDays cannot be less than 0") } // history file before `DelBeforeTime` will be deleted oneDayDuration, _ := time.ParseDuration("-24h") delBeforeTime := time.Now().Add(oneDayDuration * time.Duration(retainDays)) if !skipConfirm { fmt.Printf("History logs before %s will be %s!\n", color.HiYellowString(delBeforeTime.Format("2006-01-02T15:04:05")), color.HiYellowString("deleted"), ) if err := tui.PromptForConfirmOrAbortError("Do you want to continue? [y/N]:"); err != nil { return err } } fList, err := getHistoryFileList(env.LocalPath(HistoryDir)) if err != nil { return err } if len(fList) == 0 { return nil } for _, f := range fList { if f.info.ModTime().Before(delBeforeTime) { err := os.Remove(f.path) if err != nil { return err } continue } } return nil } // getHistory get tiup history execution row func (i *historyItem) getHistory() ([]*historyRow, error) { rows := []*historyRow{} fi, err := os.Open(i.path) if err != nil { return rows, err } defer fi.Close() br := bufio.NewReader(fi) for { a, _, c := br.ReadLine() if c == io.EOF { break } r := &historyRow{} // ignore err := json.Unmarshal(a, r) if err != nil { continue } rows = append(rows, r) } return rows, nil } // getHistoryFileList get the history file list func getHistoryFileList(dir string) ([]historyItem, error) { fileInfos, err := os.ReadDir(dir) if err != nil { return nil, err } hfileList := []historyItem{} for _, fi := range fileInfos { if fi.IsDir() { continue } // another suffix // ex: tiup-history-0.bak i, err := strconv.Atoi((strings.TrimPrefix(fi.Name(), historyPrefix))) if err != nil { continue } fInfo, _ := fi.Info() hfileList = append(hfileList, historyItem{ path: filepath.Join(dir, fi.Name()), index: i, info: fInfo, }) } sort.Slice(hfileList, func(i, j int) bool { return hfileList[i].index > hfileList[j].index }) return hfileList, nil } // getLatestHistoryFile get the latest history file, use index 0 if it doesn't exist func getLatestHistoryFile(dir string) (item historyItem) { fileList, err := getHistoryFileList(dir) // start from 0 if len(fileList) == 0 || err != nil { item.index = 0 item.path = filepath.Join(dir, fmt.Sprintf("%s%s", historyPrefix, strconv.Itoa(item.index))) return } latestItem := fileList[0] if latestItem.info.Size() >= historySize { item.index = latestItem.index + 1 item.path = filepath.Join(dir, fmt.Sprintf("%s%s", historyPrefix, strconv.Itoa(item.index))) } else { item = latestItem } return } // HidePassword replace password with ****** func HidePassword(args []string) []string { redactArgs := []string{ // general "-p", // dumpling "--password", // lightning "--tidb-password", } var r []string for i := 0; i < len(args); i++ { arg := args[i] redacted := false for _, ra := range redactArgs { if strings.HasPrefix(arg, ra) && len(arg) > len(ra) { r = append(r, ra+"******") redacted = true break } else if arg == ra && i+1 < len(args) { r = append(r, ra, "******") i++ // skip next word that may be password redacted = true break } } if !redacted { r = append(r, arg) } } return r } tiup-1.16.3/pkg/environment/tiup.go000066400000000000000000000013361505422223000172160ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package environment var _env *Environment // SetGlobalEnv the global env used. func SetGlobalEnv(env *Environment) { _env = env } // GlobalEnv Get the global env used. func GlobalEnv() *Environment { return _env } tiup-1.16.3/pkg/exec/000077500000000000000000000000001505422223000142635ustar00rootroot00000000000000tiup-1.16.3/pkg/exec/run.go000066400000000000000000000163121505422223000154210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package exec import ( "fmt" "os" "os/exec" "os/signal" "path/filepath" "strings" "sync" "syscall" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/environment" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/tui/colorstr" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "golang.org/x/mod/semver" ) // Skip displaying "Starting component ..." message for some commonly used components. var skipStartingMessages = map[string]bool{ "playground": true, "cluster": true, } // RunComponent start a component and wait it func RunComponent(env *environment.Environment, tag, spec, binPath string, args []string) error { component, version := environment.ParseCompVersion(spec) if version == "" { cmdCheckUpdate(component, version) } binPath, err := PrepareBinary(component, version, binPath) if err != nil { return err } // Clean data if current instance is a temporary clean := tag == "" && os.Getenv(localdata.EnvNameInstanceDataDir) == "" // p, err := launchComponent(component, version, binPath, tag, args, env) instanceDir := os.Getenv(localdata.EnvNameInstanceDataDir) if instanceDir == "" { // Generate a tag for current instance if the tag doesn't specified if tag == "" { tag = utils.Base62Tag() } instanceDir = env.LocalPath(localdata.DataParentDir, tag) } defer cleanDataDir(clean, instanceDir) params := &PrepareCommandParams{ Component: component, Version: version, BinPath: binPath, Tag: tag, InstanceDir: instanceDir, Args: args, Env: env, } c, err := PrepareCommand(params) if err != nil { return err } if skip, ok := skipStartingMessages[component]; !skip || !ok { colorstr.Fprintf(os.Stderr, "Starting component [bold]%s[reset]: %s\n", component, strings.Join(environment.HidePassword(c.Args), " ")) } err = c.Start() if err != nil { return errors.Annotatef(err, "Failed to start component `%s`", component) } // If the process has been launched, we must save the process info to meta directory if err := saveProcessInfo(params, c); err != nil { fmt.Fprintf(os.Stderr, "Error saving process info %s\n", err.Error()) } sc := make(chan os.Signal, 1) signal.Notify(sc, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) go func() { for s := range sc { sig := s.(syscall.Signal) fmt.Fprintf(os.Stderr, "Got signal %v (Component: %v ; PID: %v)\n", s, component, c.Process.Pid) if sig != syscall.SIGINT { _ = syscall.Kill(c.Process.Pid, sig) } } }() return c.Wait() } func cleanDataDir(rm bool, dir string) { if !rm { return } if err := os.RemoveAll(dir); err != nil { fmt.Fprintln(os.Stderr, "clean data directory failed: ", err.Error()) } } // PrepareCommandParams for PrepareCommand. type PrepareCommandParams struct { Component string Version utils.Version BinPath string Tag string InstanceDir string Args []string Env *environment.Environment } // PrepareCommand will download necessary component and returns a *exec.Cmd func PrepareCommand(p *PrepareCommandParams) (*exec.Cmd, error) { env := p.Env profile := env.Profile() binPath := p.BinPath installPath := filepath.Dir(binPath) if err := utils.MkdirAll(p.InstanceDir, 0755); err != nil { return nil, err } sd := env.LocalPath(localdata.StorageParentDir, p.Component) if err := utils.MkdirAll(sd, 0755); err != nil { return nil, err } tiupWd, err := os.Getwd() if err != nil { return nil, err } envs := []string{ fmt.Sprintf("%s=%s", localdata.EnvNameHome, profile.Root()), fmt.Sprintf("%s=%s", localdata.EnvNameUserInputVersion, p.Version.String()), fmt.Sprintf("%s=%s", localdata.EnvNameTiUPVersion, version.NewTiUPVersion().SemVer()), fmt.Sprintf("%s=%s", localdata.EnvNameComponentDataDir, sd), fmt.Sprintf("%s=%s", localdata.EnvNameComponentInstallDir, installPath), // to be removed in TiUP 2.0 fmt.Sprintf("%s=%s", localdata.EnvNameWorkDir, tiupWd), fmt.Sprintf("%s=%s", localdata.EnvTag, p.Tag), fmt.Sprintf("%s=%s", localdata.EnvNameInstanceDataDir, p.InstanceDir), } envs = append(envs, os.Environ()...) // init the command c := exec.Command(binPath, p.Args...) c.Env = envs c.Stdin = os.Stdin c.Stdout = os.Stdout c.Stderr = os.Stderr return c, nil } func cmdCheckUpdate(component string, version utils.Version) { const ( slowTimeout = 1 * time.Second // Timeout to display checking message cancelTimeout = 2 * time.Second // Timeout to cancel the check ) // This mutex is used for protecting flag as well as stdout mu := sync.Mutex{} isCheckFinished := false result := make(chan string, 1) go func() { time.Sleep(slowTimeout) mu.Lock() defer mu.Unlock() if !isCheckFinished { colorstr.Fprintf(os.Stderr, "Checking updates for component [bold]%s[reset]... ", component) } }() go func() { time.Sleep(cancelTimeout) result <- colorstr.Sprintf("[yellow]Timedout (after %s)", cancelTimeout) }() go func() { latestV, _, err := environment.GlobalEnv().V1Repository().LatestStableVersion(component, false) if err != nil { result <- "" return } selectVer, _ := environment.GlobalEnv().SelectInstalledVersion(component, version) if semver.Compare(selectVer.String(), latestV.String()) < 0 { result <- colorstr.Sprintf(` [yellow]A new version of [bold]%[1]s[reset][yellow] is available:[reset] [red][bold]%[2]s[reset] -> [green][bold]%[3]s[reset] To update this component: [tiup_command]tiup update %[1]s[reset] To update all components: [tiup_command]tiup update --all[reset] `, component, selectVer.String(), latestV.String()) } else { result <- "" } }() s := <-result mu.Lock() defer mu.Unlock() isCheckFinished = true if len(s) > 0 { fmt.Fprintln(os.Stderr, s) } } // PrepareBinary use given binpath or download from tiup mirror func PrepareBinary(component string, version utils.Version, binPath string) (string, error) { if binPath != "" { tmp, err := filepath.Abs(binPath) if err != nil { return "", errors.Trace(err) } binPath = tmp } else { selectVer, err := environment.GlobalEnv().DownloadComponentIfMissing(component, version) if err != nil { return "", err } binPath, err = environment.GlobalEnv().BinaryPath(component, selectVer) if err != nil { return "", err } } return binPath, nil } func saveProcessInfo(p *PrepareCommandParams, c *exec.Cmd) error { info := &localdata.Process{ Component: p.Component, CreatedTime: time.Now().Format(time.RFC3339), Pid: c.Process.Pid, Exec: c.Args[0], Args: environment.HidePassword(c.Args), Dir: p.InstanceDir, Env: c.Env, Cmd: c, } return environment.GlobalEnv().Profile().WriteMetaFile(info.Dir, info) } tiup-1.16.3/pkg/insight/000077500000000000000000000000001505422223000150045ustar00rootroot00000000000000tiup-1.16.3/pkg/insight/chrony.go000066400000000000000000000105521505422223000166400ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // Use ntpq to get basic info of chrony on the system package insight import ( "bytes" "log" "os/exec" "strconv" "strings" ) // ChronyStat is holding the chrony statistics type ChronyStat struct { ReferenceID string `json:"referenceid,omitempty"` Stratum int `json:"stratum,omitempty"` RefTime string `json:"ref_time,omitempty"` SystemTime string `json:"system_time,omitempty"` LastOffset float64 `json:"last_offset,omitempty"` // millisecond RMSOffset float64 `json:"rms_offset,omitempty"` // millisecond Frequency float64 `json:"frequency,omitempty"` // millisecond ResidualFreq string `json:"residual_freq,omitempty"` Skew string `json:"skew,omitempty"` RootDelay float64 `json:"root_delay,omitempty"` // millisecond RootDispersion float64 `json:"root_dispersion,omitempty"` // millisecond UpdateInterval float64 `json:"update_interval,omitempty"` // millisecond LeapStatus string `json:"leap_status,omitempty"` } //revive:disable:get-return func (cs *ChronyStat) getChronyInfo() { // try common locations first, then search PATH, this could cover some // contitions when PATH is not correctly set on calling `collector` var syncdBinPaths = []string{"/usr/sbin/chronyc", "/usr/bin/chronyc", "chronyc"} var syncd string var err error for _, syncdPath := range syncdBinPaths { if syncd, err = exec.LookPath(syncdPath); err == nil { // use the first found exec break } cs.LeapStatus = err.Error() } // when no `chrony` found, just return if syncd == "" { return } cmd := exec.Command(syncd, "tracking") var out bytes.Buffer cmd.Stdout = &out err = cmd.Run() if err != nil { cs.LeapStatus = "none" return } // set default sync status to none cs.LeapStatus = "none" output := strings.FieldsFunc(out.String(), multiSplit) for _, kv := range output { tmp := strings.Split(strings.TrimSpace(kv), " : ") switch { case strings.HasPrefix(tmp[0], "Reference ID"): cs.ReferenceID = tmp[1] case strings.HasPrefix(tmp[0], "Stratum"): cs.Stratum, err = strconv.Atoi(tmp[1]) if err != nil { log.Fatal(err) } case strings.HasPrefix(tmp[0], "Ref time"): cs.RefTime = tmp[1] case strings.HasPrefix(tmp[0], "System time"): cs.SystemTime = tmp[1] case strings.HasPrefix(tmp[0], "Last offset"): cs.LastOffset, err = strconv.ParseFloat(strings.Split(tmp[1], " ")[0], 64) if err != nil { log.Fatal(err) } // second -> millisecond cs.LastOffset *= 1000 case strings.HasPrefix(tmp[0], "RMS offset"): cs.RMSOffset, err = strconv.ParseFloat(strings.Split(tmp[1], " ")[0], 64) if err != nil { log.Fatal(err) } // second -> millisecond cs.RMSOffset *= 1000 case strings.HasPrefix(tmp[0], "Frequency"): cs.Frequency, err = strconv.ParseFloat(strings.Split(tmp[1], " ")[0], 64) if err != nil { log.Fatal(err) } // second -> millisecond cs.Frequency *= 1000 case strings.HasPrefix(tmp[0], "Residual freq"): cs.ResidualFreq = tmp[1] case strings.HasPrefix(tmp[0], "Skew"): cs.Skew = tmp[1] case strings.HasPrefix(tmp[0], "Root delay"): cs.RootDelay, err = strconv.ParseFloat(strings.Split(tmp[1], " ")[0], 64) if err != nil { log.Fatal(err) } // second -> millisecond cs.RootDelay *= 1000 case strings.HasPrefix(tmp[0], "Root dispersion"): cs.RootDispersion, err = strconv.ParseFloat(strings.Split(tmp[1], " ")[0], 64) if err != nil { log.Fatal(err) } // second -> millisecond cs.RootDispersion *= 1000 case strings.HasPrefix(tmp[0], "Update interval"): cs.UpdateInterval, err = strconv.ParseFloat(strings.Split(tmp[1], " ")[0], 64) if err != nil { log.Fatal(err) } cs.UpdateInterval *= 1000 case strings.HasPrefix(tmp[0], "Leap status"): // none, normal, close cs.LeapStatus = strings.ToLower(tmp[1]) default: continue } } } //revive:enable:get-return tiup-1.16.3/pkg/insight/dmesg.go000066400000000000000000000012451505422223000164340ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package insight import ( "github.com/pingcap/tiup/pkg/kmsg" ) func (info *Info) collectDmsg() error { msg, err := kmsg.Read() info.DMesg = msg return err } tiup-1.16.3/pkg/insight/epoll.go000066400000000000000000000022061505422223000164460ustar00rootroot00000000000000// Check if epoll exclusive available on the host // Ported from https://github.com/pingcap/tidb-ansible/blob/v3.1.0/scripts/check/epoll_chk.cc //go:build cgo && linux // +build cgo,linux package insight /* #include */ import "C" import ( "syscall" "golang.org/x/sys/unix" ) // checkEpollExclusive checks if the host system support epoll exclusive mode func checkEpollExclusive() bool { fd, err := syscall.EpollCreate1(syscall.EPOLL_CLOEXEC) if err != nil || fd < 0 { return false } defer syscall.Close(fd) evfd, err := C.eventfd(0, C.EFD_NONBLOCK|C.EFD_CLOEXEC) if err != nil || evfd < 0 { return false } defer syscall.Close(int(evfd)) /* choose events that should cause an error on EPOLLEXCLUSIVE enabled kernels - specifically the combination of EPOLLONESHOT and EPOLLEXCLUSIVE */ ev := syscall.EpollEvent{ Events: unix.EPOLLET | unix.EPOLLIN | unix.EPOLLEXCLUSIVE | unix.EPOLLONESHOT, //Fd: int32(fd), } if err := syscall.EpollCtl(fd, unix.EPOLL_CTL_ADD, int(evfd), &ev); err != nil { if err != syscall.EINVAL { return false } // else true } else { return false } return true } tiup-1.16.3/pkg/insight/epoll_no_cgo.go000066400000000000000000000005771505422223000200030ustar00rootroot00000000000000// Check if epoll exclusive available on the host // Ported from https://github.com/pingcap/tidb-ansible/blob/v3.1.0/scripts/check/epoll_chk.cc //go:build !cgo || !linux // +build !cgo !linux package insight // checkEpollExclusive checks if the host system support epoll exclusive mode func checkEpollExclusive() bool { // If CGO is disabled, always report false return false } tiup-1.16.3/pkg/insight/insight.go000066400000000000000000000056331505422223000170070ustar00rootroot00000000000000// Copyright 2018 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package insight import ( "fmt" "runtime" "strings" "time" "github.com/AstroProfundis/sysinfo" "github.com/pingcap/tiup/pkg/kmsg" ) // Meta are information about insight itself type Meta struct { Timestamp time.Time `json:"timestamp"` UPTime float64 `json:"uptime,omitempty"` IdleTime float64 `json:"idle_time,omitempty"` SiVer string `json:"sysinfo_ver"` GitBranch string `json:"git_branch"` GitCommit string `json:"git_commit"` GoVersion string `json:"go_version"` } // Info are information gathered from the system type Info struct { Meta Meta `json:"meta"` SysInfo sysinfo.SysInfo `json:"sysinfo"` NTP TimeStat `json:"ntp"` ChronyStat ChronyStat `json:"chrony"` Partitions []BlockDev `json:"partitions,omitempty"` ProcStats []ProcessStat `json:"proc_stats,omitempty"` EpollExcl bool `json:"epoll_exclusive,omitempty"` SysConfig *SysCfg `json:"system_configs,omitempty"` DMesg []*kmsg.Msg `json:"dmesg,omitempty"` Sockets []Socket `json:"sockets,omitempty"` } // Options sets options for info collection type Options struct { Pid string Proc bool Syscfg bool // collect kernel configs or not Dmesg bool // collect kernel logs or not } // GetInfo collects Info // //revive:disable:get-return func (info *Info) GetInfo(opts Options) { var pidList []string if len(opts.Pid) > 0 { pidList = strings.Split(opts.Pid, ",") } info.Meta.getMeta(pidList) if opts.Proc { info.ProcStats = GetProcessStats(pidList) return } info.SysInfo.GetSysInfo() info.NTP.getNTPInfo() info.ChronyStat.getChronyInfo() info.Partitions = GetPartitionStats() switch runtime.GOOS { case "android", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd": info.EpollExcl = checkEpollExclusive() default: info.EpollExcl = false } if opts.Syscfg { info.SysConfig = &SysCfg{} info.SysConfig.getSysCfg() } if opts.Dmesg { _ = info.collectDmsg() } _ = info.collectSockets() } func (meta *Meta) getMeta(pidList []string) { meta.Timestamp = time.Now() if sysUptime, sysIdleTime, err := GetSysUptime(); err == nil { meta.UPTime = sysUptime meta.IdleTime = sysIdleTime } meta.SiVer = sysinfo.Version meta.GitBranch = GitBranch meta.GitCommit = GitCommit meta.GoVersion = fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH) } //revive:enable:get-return tiup-1.16.3/pkg/insight/ntp.go000066400000000000000000000075401505422223000161420ustar00rootroot00000000000000// Copyright © 2018 PingCAP Inc. // // Use of this source code is governed by an MIT-style license that can be found in the LICENSE file. // // Use ntpq to get basic info of NTPd on the system package insight import ( "bytes" "log" "os/exec" "strconv" "strings" ) // TimeStat is holding the NTP time statistics type TimeStat struct { Ver string `json:"version,omitempty"` Sync string `json:"sync,omitempty"` Stratum int `json:"stratum,omitempty"` Precision int `json:"precision,omitempty"` Rootdelay float64 `json:"rootdelay,omitempty"` Rootdisp float64 `json:"rootdisp,omitempty"` Refid string `json:"refid,omitempty"` Peer int `json:"peer,omitempty"` TC int `json:"tc,omitempty"` Mintc int `json:"mintc,omitempty"` Offset float64 `json:"offset,omitempty"` Frequency float64 `json:"frequency,omitempty"` Jitter float64 `json:"jitter,omitempty"` ClkJitter float64 `json:"clk_jitter,omitempty"` ClkWander float64 `json:"clk_wander,omitempty"` Status string `json:"status,omitempty"` } //revive:disable:get-return func (ts *TimeStat) getNTPInfo() { // try common locations first, then search PATH, this could cover some // contitions when PATH is not correctly set on calling `collector` var syncdBinPaths = []string{"/usr/sbin/ntpq", "/usr/bin/ntpq", "ntpq"} var syncd string var err error for _, syncdPath := range syncdBinPaths { if syncd, err = exec.LookPath(syncdPath); err == nil { // use the first found exec break } ts.Ver = err.Error() } // when no `ntpq` found, just return if syncd == "" { return } cmd := exec.Command(syncd, "-c rv", "127.0.0.1") var out bytes.Buffer cmd.Stdout = &out err = cmd.Run() if err != nil { log.Fatal(err) } // set default sync status to none ts.Sync = "none" output := strings.FieldsFunc(out.String(), multiSplit) for _, kv := range output { tmp := strings.Split(strings.TrimSpace(kv), "=") switch { case tmp[0] == "version": ts.Ver = strings.Trim(tmp[1], "\"") case tmp[0] == "stratum": ts.Stratum, err = strconv.Atoi(tmp[1]) if err != nil { log.Fatal(err) } case tmp[0] == "precision": ts.Precision, err = strconv.Atoi(tmp[1]) if err != nil { log.Fatal(err) } case tmp[0] == "rootdelay": ts.Rootdelay, err = strconv.ParseFloat(tmp[1], 64) if err != nil { log.Fatal(err) } case tmp[0] == "rootdisp": ts.Rootdisp, err = strconv.ParseFloat(tmp[1], 64) if err != nil { log.Fatal(err) } case tmp[0] == "refid": ts.Refid = tmp[1] case tmp[0] == "peer": ts.Peer, err = strconv.Atoi(tmp[1]) if err != nil { log.Fatal(err) } case tmp[0] == "tc": ts.TC, err = strconv.Atoi(tmp[1]) if err != nil { log.Fatal(err) } case tmp[0] == "mintc": ts.Mintc, err = strconv.Atoi(tmp[1]) if err != nil { log.Fatal(err) } case tmp[0] == "offset": ts.Offset, err = strconv.ParseFloat(tmp[1], 64) if err != nil { log.Fatal(err) } case tmp[0] == "frequency": ts.Frequency, err = strconv.ParseFloat(tmp[1], 64) if err != nil { log.Fatal(err) } case tmp[0] == "sys_jitter": ts.Jitter, err = strconv.ParseFloat(tmp[1], 64) if err != nil { log.Fatal(err) } case tmp[0] == "clk_jitter": ts.ClkJitter, err = strconv.ParseFloat(tmp[1], 64) if err != nil { log.Fatal(err) } case tmp[0] == "clk_wander": ts.ClkWander, err = strconv.ParseFloat(tmp[1], 64) if err != nil { log.Fatal(err) } case strings.Contains(tmp[0], "sync"): ts.Sync = tmp[0] case len(tmp) > 2 && strings.Contains(tmp[1], "status"): // sample line of tmp: ["associd", "0 status", "0618 leap_none"] ts.Status = strings.Split(tmp[2], " ")[0] default: continue } } } //revive:enable:get-return func multiSplit(r rune) bool { switch r { case ',': return true case '\n': return true default: return false } } tiup-1.16.3/pkg/insight/partitions.go000066400000000000000000000155511505422223000175360ustar00rootroot00000000000000// get partitions info of the system package insight import ( "os" "path" "strconv" "strings" si "github.com/AstroProfundis/sysinfo" ) // BlockDev is similar to blkDev_cxt in lsblk (from util-linux) // contains metadata of a block device type BlockDev struct { Name string `json:"name,omitempty"` Partition bool `json:"partition,omitempty"` Mount MountInfo `json:"mount"` UUID string `json:"uuid,omitempty"` Sectors uint64 `json:"sectors,omitempty"` Size uint64 `json:"size,omitempty"` SubDev []BlockDev `json:"subdev,omitempty"` Holder []string `json:"holder_of,omitempty"` Slave []string `json:"slave_of,omitempty"` Rotational string `json:"rotational,omitempty"` } // MountInfo is the metadata of a mounted device type MountInfo struct { MountPoint string `json:"mount_point,omitempty"` FSType string `json:"filesystem,omitempty"` // Mount options used to mount this device Options string `json:"mount_options,omitempty"` } const ( sysBlockPath = "/sys/block" devMapperPath = "/dev/mapper" ) // GetPartitionStats is getting disk partition statistics func GetPartitionStats() []BlockDev { partStats := make([]BlockDev, 0) if dirSysBlk, err := os.Lstat(sysBlockPath); err == nil && dirSysBlk.IsDir() { fi, _ := os.Open(sysBlockPath) blockDevs, _ := fi.Readdir(0) for _, blk := range blockDevs { var blkDev BlockDev if blkDev.getBlockDevice(blk, nil) { partStats = append(partStats, blkDev) } } matchUUIDs(partStats, getUUIDs()) matchMounts(partStats, checkMounts()) } return partStats } func (blkDev *BlockDev) getBlockDevice(blk os.FileInfo, parent os.FileInfo) bool { var fullpath string var dev string if parent != nil { fullpath = path.Join(sysBlockPath, parent.Name(), blk.Name()) dev = fullpath } else { fullpath = path.Join(sysBlockPath, blk.Name()) dev, _ = os.Readlink(fullpath) } if strings.HasPrefix(dev, "../devices/virtual/") && (strings.Contains(dev, "ram") || strings.Contains(dev, "loop")) { return false } // open the dir var fi *os.File if parent != nil { fi, _ = os.Open(dev) } else { fi, _ = os.Open(path.Join(sysBlockPath, dev)) } subFiles, err := fi.Readdir(0) if err != nil { return false } // check for sub devices for _, subFile := range subFiles { // check if this is a partition if subFile.Name() == "partition" { blkDev.Partition = true } // populate subdev if strings.HasPrefix(subFile.Name(), blk.Name()) { var subBlk BlockDev subBlk.getBlockDevice(subFile, blk) blkDev.SubDev = append(blkDev.SubDev, subBlk) } } blkDev.Name = blk.Name() blkSec, err := strconv.Atoi(si.SlurpFile(path.Join(fullpath, "size"))) if err == nil { blkDev.Sectors = uint64(blkSec) blkDev.Size = blkDev.Sectors << 9 } slaves, holders := listDeps(blk.Name()) if len(slaves) > 0 { for _, slave := range slaves { blkDev.Slave = append(blkDev.Slave, slave.Name()) } } if len(holders) > 0 { for _, holder := range holders { blkDev.Holder = append(blkDev.Holder, holder.Name()) } } blkDev.Rotational = si.SlurpFile(path.Join(fullpath, "queue/rotational")) return true } // listDeps check and return the dependency relationship of partitions func listDeps(blk string) ([]os.FileInfo, []os.FileInfo) { fiSlaves, _ := os.Open(path.Join(sysBlockPath, blk, "slaves")) fileInfoHolders, _ := os.Open(path.Join(sysBlockPath, blk, "holders")) slaves, _ := fiSlaves.Readdir(0) holders, _ := fileInfoHolders.Readdir(0) return slaves, holders } // getUUIDs get UUIDs for partitions and put them in a map to device names func getUUIDs() map[string]string { sysDiskUUID := "/dev/disk/by-uuid" fi, err := os.Open(sysDiskUUID) if err != nil { return nil } links, err := fi.Readdir(0) if err != nil { return nil } diskByUUID := make(map[string]string) for _, link := range links { if link.IsDir() { continue } blk, err := os.Readlink(path.Join(sysDiskUUID, link.Name())) if err != nil { continue } blkName := strings.TrimPrefix(blk, "../../") diskByUUID[blkName] = link.Name() } return diskByUUID } // matchUUIDs pair UUIDs and their other device information by names func matchUUIDs(devs []BlockDev, diskByUUID map[string]string) { if len(devs) < 1 || diskByUUID == nil { return } // match devs to their UUIDs for i := range devs { devs[i].UUID = diskByUUID[devs[i].Name] // sub devices if len(devs[i].SubDev) < 1 { continue } matchUUIDs(devs[i].SubDev, diskByUUID) } } // checkMounts get meta info of mount points and put them in a map to device names func checkMounts() map[string]MountInfo { raw, err := os.ReadFile(GetProcPath("mounts")) if err != nil { return nil } rawLines := strings.Split(string(raw), "\n") mountPoints := make(map[string]MountInfo) for _, line := range rawLines { mountInfo := strings.Split(line, " ") if len(mountInfo) < 6 { continue } var mp MountInfo mp.MountPoint = mountInfo[1] mp.FSType = mountInfo[2] mp.Options = mountInfo[3] devPath := strings.Split(mountInfo[0], "/") if len(devPath) < 1 { continue } devName := devPath[len(devPath)-1:][0] mountPoints[devName] = mp } // check for swap partitions // note: swap file is not supported yet, as virtual block devices // are excluded from final result if swaps, err := os.ReadFile(GetProcPath("swaps")); err == nil { swapLines := strings.Split(string(swaps), "\n") for i, line := range swapLines { // skip table headers and empty line if i == 0 || line == "" { continue } devPath := strings.Split(strings.Fields(line)[0], "/") if len(devPath) < 1 { continue } var mp MountInfo mp.MountPoint = "[SWAP]" mp.FSType = "swap" devName := devPath[len(devPath)-1:][0] mountPoints[devName] = mp } } return mountPoints } // matchMounts pair mount point meta and their other device information by names func matchMounts(devs []BlockDev, mountPoints map[string]MountInfo) { if len(devs) < 1 || mountPoints == nil { return } // read device mapper info // we build results by block device names, but the names in mount info file // are device mapper names, so we need to find the mapping list of them // errors are ignored when reading the dir devMapperNames := make(map[string]string) if dirDevMapper, err := os.Lstat(devMapperPath); err == nil && dirDevMapper.IsDir() { fi, _ := os.Open(devMapperPath) devMappers, _ := fi.Readdir(0) for _, mapper := range devMappers { fullpath := path.Join(devMapperPath, mapper.Name()) dev, _ := os.Readlink(fullpath) devMapperNames[path.Base(dev)] = mapper.Name() } } for i := range devs { // find mount point info of mapped devices devName := devs[i].Name if mapperName, ok := devMapperNames[devName]; ok { devName = mapperName } devs[i].Mount = mountPoints[devName] // sub devices if len(devs[i].SubDev) < 1 { continue } matchMounts(devs[i].SubDev, mountPoints) } } tiup-1.16.3/pkg/insight/process.go000066400000000000000000000117171505422223000170200ustar00rootroot00000000000000// process package insight import ( "log" "strconv" "strings" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/process" ) // ProcessStat contains basic info of a running process type ProcessStat struct { Name string `json:"name"` Pid int32 `json:"pid"` Exec string `json:"exec"` Cmdline string `json:"cmd"` Status string `json:"status"` StartTime float64 `json:"start_time"` CPUTimes *cpu.TimesStat `json:"cpu_times"` Memory *process.MemoryInfoStat `json:"memory"` Rlimit []RlimitUsage `json:"resource_limit"` } // RlimitUsage is the resource limit usage of a process type RlimitUsage struct { Resource string `json:"resource"` Soft int64 `json:"soft"` Hard int64 `json:"hard"` Used uint64 `json:"used"` } // GetProcessStats gets processlist statistics func GetProcessStats(pidList []string) []ProcessStat { if pidList != nil { return getProcStatsByPIDList(pidList) } return getProcStatsByName() } func getProcStatsByPIDList(pidList []string) []ProcessStat { stats := make([]ProcessStat, 0) for _, pidStr := range pidList { pidNum, err := strconv.Atoi(pidStr) if err != nil { log.Fatal(err) } proc, err := getProcessByPID(pidNum) if err != nil { log.Fatal(err) } if proc == nil { continue } var stat ProcessStat stat.getProcessStat(proc) stats = append(stats, stat) } return stats } func getProcStatsByName() []ProcessStat { tiServers := []string{"pd-server", "tikv-server", "tidb-server"} stats := make([]ProcessStat, 0) for _, procName := range tiServers { procList, err := getProcessesByName(procName) if err != nil { log.Fatal(err) } if len(procList) < 1 { continue } for _, proc := range procList { var stat ProcessStat stat.getProcessStat(proc) stats = append(stats, stat) } } return stats } func getRlimitUsage(proc *process.Process) []RlimitUsage { resources := map[int32]string{ // Resource limit constants are from: // /usr/include/x86_64-linux-gnu/bits/resource.h // from libc6-dev package in Ubuntu 16.10 // Per-process CPU limit, in seconds. 0: "cpu", // Largest file that can be created, in bytes. 1: "fsize", // Maximum size of data segment, in bytes. 2: "data", // Maximum size of stack segment, in bytes. 3: "stack", // Largest core file that can be created, in bytes. 4: "core", // Largest resident set size, in bytes. // This affects swapping; processes that are exceeding their // resident set size will be more likely to have physical memory // taken from them. 5: "rss", // Number of processes. 6: "nproc", // Number of open files. 7: "nofile", // Locked-in-memory address space. 8: "memlock", // Address space limit. 9: "as", // Maximum number of file locks. 10: "locks", // Maximum number of pending signals. 11: "sigpending", // Maximum bytes in POSIX message queues. 12: "msgqueue", // Maximum nice priority allowed to raise to. // Nice levels 19 .. -20 correspond to 0 .. 39 // values of this resource limit. 13: "nice", // Maximum realtime priority allowed for non-priviledged // processes. 14: "rtprio", // Maximum CPU time in µs that a process scheduled under a real-time // scheduling policy may consume without making a blocking system // call before being forcibly descheduled. 15: "rttime", } result := make([]RlimitUsage, 0) rlimit, _ := proc.RlimitUsage(true) for _, res := range rlimit { var usage RlimitUsage usage.Resource = resources[res.Resource] usage.Soft = int64(res.Soft) usage.Hard = int64(res.Hard) usage.Used = res.Used result = append(result, usage) } return result } //revive:disable:get-return func (proc_stat *ProcessStat) getProcessStat(proc *process.Process) { proc_stat.Pid = proc.Pid proc_stat.Name, _ = proc.Name() proc_stat.Exec, _ = proc.Exe() proc_stat.Cmdline, _ = proc.Cmdline() proc_stat.Status, _ = proc.Status() proc_stat.StartTime, _ = getProcStartTime(proc) proc_stat.CPUTimes, _ = proc.Times() proc_stat.Memory, _ = proc.MemoryInfo() proc_stat.Rlimit = getRlimitUsage(proc) } //revive:enable:get-return func getProcessByPID(pid int) (*process.Process, error) { procList, err := process.Processes() if err != nil || len(procList) < 1 { return nil, err } for _, proc := range procList { // skip when process no longer exist if int(proc.Pid) == pid { return proc, err } } return nil, err } func getProcessesByName(searchName string) ([]*process.Process, error) { procList, err := process.Processes() if err != nil || len(procList) < 1 { return nil, err } procResult := make([]*process.Process, 0) for _, proc := range procList { // skip when process no longer exist procName, _ := proc.Name() // return multiple processes that match the search if strings.Contains(procName, searchName) { procResult = append(procResult, proc) } } return procResult, err } tiup-1.16.3/pkg/insight/process_linux.go000066400000000000000000000010411505422223000202240ustar00rootroot00000000000000//go:build linux // +build linux package insight import ( "log" "os" "strconv" "strings" "github.com/shirou/gopsutil/process" ) func getProcStartTime(proc *process.Process) (float64, error) { statPath := GetProcPath(strconv.Itoa(int(proc.Pid)), "stat") contents, err := os.ReadFile(statPath) if err != nil { log.Fatal(err) return 0, err } fields := strings.Fields(string(contents)) if startTime, err := strconv.ParseFloat(fields[21], 64); err == nil { return startTime / float64(process.ClockTicks), err } return 0, err } tiup-1.16.3/pkg/insight/process_others.go000066400000000000000000000002701505422223000203740ustar00rootroot00000000000000//go:build !linux // +build !linux package insight import ( "github.com/shirou/gopsutil/process" ) func getProcStartTime(proc *process.Process) (float64, error) { return 0, nil } tiup-1.16.3/pkg/insight/socket.go000066400000000000000000000017361505422223000166320ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package insight import "github.com/vishvananda/netlink" // Socket is a network socket type Socket struct { Family uint8 `json:"family"` State uint8 `json:"state"` SourceAddr string `json:"source_addr"` SourcePort uint16 `json:"source_port"` DestAddr string `json:"dest_addr"` DestPort uint16 `json:"dest_port"` } func (info *Info) collectSockets() error { sockets, err := GetIPV4Sockets(netlink.TCP_ESTABLISHED) info.Sockets = sockets return err } tiup-1.16.3/pkg/insight/socket_linux.go000066400000000000000000000025641505422223000200510ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. //go:build linux // +build linux package insight import ( "github.com/vishvananda/netlink" "golang.org/x/sys/unix" ) // GetIPV4Sockets is getting sockets from states func GetIPV4Sockets(states ...uint8) ([]Socket, error) { netSockets, err := netlink.SocketDiagTCP(unix.AF_INET) if err != nil { return nil, err } tcpStates := make(map[uint8]bool, len(states)) for _, state := range states { tcpStates[state] = true } sockets := make([]Socket, 0, len(netSockets)) for _, socket := range netSockets { if len(tcpStates) > 0 && !tcpStates[socket.State] { continue } sockets = append(sockets, Socket{ Family: socket.Family, State: socket.State, SourceAddr: socket.ID.Source.String(), SourcePort: socket.ID.SourcePort, DestAddr: socket.ID.Destination.String(), DestPort: socket.ID.DestinationPort, }) } return sockets, nil } tiup-1.16.3/pkg/insight/socket_others.go000066400000000000000000000012651505422223000202130ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. //go:build !linux // +build !linux package insight // GetIPV4Sockets is getting sockets from states func GetIPV4Sockets(states ...uint8) ([]Socket, error) { return nil, nil } tiup-1.16.3/pkg/insight/syscfg.go000066400000000000000000000036331505422223000166360ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package insight import ( "os" "strconv" "strings" sysctl "github.com/lorenzosaino/go-sysctl" ) // SysCfg are extra system configs we collected type SysCfg struct { SecLimit []SecLimitField `json:"sec_limit,omitempty"` SysCtl map[string]string `json:"sysctl,omitempty"` } // SecLimitField is the config field in security limit file type SecLimitField struct { Domain string `json:"domain"` Type string `json:"type"` Item string `json:"item"` Value int `json:"value"` } //revive:disable:get-return func (c *SysCfg) getSysCfg() { c.SysCtl = collectSysctl() c.SecLimit = collectSecLimit() } //revive:enable:get-return func collectSysctl() map[string]string { msg, err := sysctl.GetAll() if err != nil { return nil } return msg } const limitFilePath = "/etc/security/limits.conf" func collectSecLimit() []SecLimitField { result := make([]SecLimitField, 0) data, err := os.ReadFile(limitFilePath) if err != nil { return result } for line := range strings.SplitSeq(string(data), "\n") { line = strings.TrimSpace(line) if !strings.HasPrefix(line, "#") { fields := strings.Fields(line) if len(fields) < 4 { continue } var field SecLimitField field.Domain = fields[0] field.Type = fields[1] field.Item = fields[2] v, err := strconv.Atoi(fields[3]) if err != nil { continue } field.Value = v result = append(result, field) } } return result } tiup-1.16.3/pkg/insight/utils.go000066400000000000000000000017331505422223000164770ustar00rootroot00000000000000package insight import ( "os" "strconv" "strings" ) // Version information var ( // GitBranch is initialized during make GitBranch = "Not Provided" // GitCommit is initialized during make GitCommit = "Not Provided" // Proc dir path for Linux procPath = "/proc" ) // GetProcPath is getting the proc path func GetProcPath(paths ...string) string { switch len(paths) { case 0: return procPath default: all := make([]string, len(paths)+1) all[0] = procPath copy(all[1:], paths) return strings.Join(all, "/") } } // GetSysUptime gets the system uptime func GetSysUptime() (float64, float64, error) { contents, err := os.ReadFile(GetProcPath("uptime")) if err != nil { return 0, 0, err } timerCounts := strings.Fields(string(contents)) uptime, err := strconv.ParseFloat(timerCounts[0], 64) if err != nil { return 0, 0, err } idleTime, err := strconv.ParseFloat(timerCounts[1], 64) if err != nil { return 0, 0, err } return uptime, idleTime, err } tiup-1.16.3/pkg/kmsg/000077500000000000000000000000001505422223000143005ustar00rootroot00000000000000tiup-1.16.3/pkg/kmsg/read.go000066400000000000000000000050621505422223000155450ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package kmsg import ( "fmt" "io" "strconv" "strings" "syscall" ) // Read reads all available kernel messages func Read() ([]*Msg, error) { fd, err := syscall.Open(kmsgFile, syscall.O_RDONLY|syscall.O_NONBLOCK, 0) if err != nil { return nil, err } defer syscall.Close(fd) result := make([]*Msg, 0) msgChan, sucChan, errChan := readKMsg(fd) for { select { case err := <-errChan: return nil, err case <-sucChan: // finished return result, nil case msg := <-msgChan: result = append(result, msg) } } } func readKMsg(fd int) (<-chan *Msg, <-chan bool, <-chan error) { msgChan := make(chan *Msg) sucChan := make(chan bool, 1) errChan := make(chan error, 1) go func() { buf := make([]byte, 8192) for { n, err := syscall.Read(fd, buf) if err != nil { if err == io.EOF || err == syscall.EAGAIN { // complete sucChan <- true return } if err == syscall.EPIPE { // read failed, retry continue } errChan <- err return } msg, err := parseMsg(string(buf[:n])) if err != nil { errChan <- err return } msgChan <- msg } }() return msgChan, sucChan, errChan } func parseMsg(msg string) (*Msg, error) { result := &Msg{} fields := strings.Split(msg, ";") if len(fields) < 2 { return nil, fmt.Errorf("incorrect kernel log format") } result.Message = strings.TrimSuffix(fields[1], "\n") prefix := strings.Split(fields[0], ",") if len(prefix) < 3 { return nil, fmt.Errorf("incorrect kernel log prefix format") } priority, err := strconv.Atoi(prefix[0]) if err != nil { return result, fmt.Errorf("incorrect kernel log priority %s", prefix[0]) } result.Facility = decodeFacility(priority) result.Severity = decodeSeverity(priority) seq, err := strconv.Atoi(prefix[1]) if err != nil { return result, fmt.Errorf("incorrect kernel log sequence %s", prefix[1]) } result.Sequence = seq ts, err := strconv.Atoi(prefix[2]) if err != nil { return result, fmt.Errorf("incorrect kernel log timestamp %s", prefix[2]) } result.Timestamp = ts return result, nil } tiup-1.16.3/pkg/kmsg/types.go000066400000000000000000000057311505422223000160010ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package kmsg import ( "fmt" ) // Ref: https://www.kernel.org/doc/Documentation/ABI/testing/dev-kmsg // The kmsg lines have prefix of the following format: // | priority | sequence | monotonic timestamp | flag | message // 6 , 339 , 5140900 , - ; NET: Registered protocol family 10 // 30 , 340 , 5690716 , - ; udevd[80]: starting version 181 // where the flag is not necessary for us, so we only parse the prefix // for the first 3 fields: priority, sequence and timestamp // the device to read kernel log from const kmsgFile = "/dev/kmsg" const severityMask = 0x07 const facilityMask = 0xf8 // Severity is part of the log priority type Severity int //revive:disable const ( // From /usr/include/sys/syslog.h. // These are the same on Linux, BSD, and OS X. LOG_EMERG Severity = iota LOG_ALERT LOG_CRIT LOG_ERR LOG_WARNING LOG_NOTICE LOG_INFO LOG_DEBUG ) //revive:enable // String implements the string interface func (p Severity) String() string { return []string{ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug", }[p] } // Facility is part of the log priority type Facility int //revive:disable const ( // From /usr/include/sys/syslog.h. // These are the same up to LOG_FTP on Linux, BSD, and OS X. LOG_KERN Facility = iota << 3 LOG_USER LOG_MAIL LOG_DAEMON LOG_AUTH LOG_SYSLOG LOG_LPR LOG_NEWS LOG_UUCP LOG_CRON LOG_AUTHPRIV LOG_FTP _ // unused _ // unused _ // unused _ // unused LOG_LOCAL0 LOG_LOCAL1 LOG_LOCAL2 LOG_LOCAL3 LOG_LOCAL4 LOG_LOCAL5 LOG_LOCAL6 LOG_LOCAL7 ) //revive:enable // String implements the string interface func (p Facility) String() string { return []string{ "kern", "user", "mail", "daemon", "auth", "syslog", "lpr", "news", "uucp", "cron", "authpriv", "ftp", "", "", "", "", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7", }[p] } func decodeSeverity(p int) Severity { return Severity(p) & severityMask } func decodeFacility(p int) Facility { return Facility(p) & facilityMask } // Msg is the type of kernel message type Msg struct { Severity Severity Facility Facility Sequence int // Sequence is the 64 bit message sequence number Timestamp int // Timestamp is the monotonic timestamp in microseconds Message string } // String implements the string interface func (m *Msg) String() string { return fmt.Sprintf("%s:%s: [%.6f] %s", m.Facility, m.Severity, float64(m.Timestamp)/1e6, m.Message) } tiup-1.16.3/pkg/localdata/000077500000000000000000000000001505422223000152635ustar00rootroot00000000000000tiup-1.16.3/pkg/localdata/config.go000066400000000000000000000026621505422223000170650ustar00rootroot00000000000000// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package localdata import ( "os" "path" "github.com/BurntSushi/toml" "github.com/pingcap/tiup/pkg/utils" ) type configBase struct { file string } // TiUPConfig represent the config file of TiUP type TiUPConfig struct { configBase Mirror string `toml:"mirror"` } // InitConfig returns a TiUPConfig struct which can flush config back to disk func InitConfig(root string) (*TiUPConfig, error) { config := TiUPConfig{configBase{path.Join(root, "tiup.toml")}, ""} if utils.IsNotExist(config.file) { return &config, nil } // We can ignore any error at current // If we have more configs in the future, we should check the error if _, err := toml.DecodeFile(config.file, &config); err != nil { return nil, err } return &config, nil } // Flush config to disk func (c *TiUPConfig) Flush() error { f, err := os.OpenFile(c.file, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0664) if err != nil { return err } defer f.Close() return toml.NewEncoder(f).Encode(c) } tiup-1.16.3/pkg/localdata/constant.go000066400000000000000000000107341505422223000174500ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package localdata // DefaultTiUPHome represents the default home directory for this build of tiup // If this is left empty, the default will be thee combination of the running // user's home directory and ProfileDirName var DefaultTiUPHome string // ProfileDirName is the name of the profile directory to be used var ProfileDirName = ".tiup" // Notice: if you try to add a new env name which is notable by the user, shou should // add it to cmd/env.go:envList so that the command `tiup env` will show that env. const ( // ComponentParentDir represent the parent directory of all downloaded components ComponentParentDir = "components" // ManifestParentDir represent the parent directory of all manifests ManifestParentDir = "manifests" // KeyInfoParentDir represent the parent directory of all keys KeyInfoParentDir = "keys" // DefaultPrivateKeyName represents the default private key file stored in ${TIUP_HOME}/keys DefaultPrivateKeyName = "private.json" // DataParentDir represent the parent directory of all running instances DataParentDir = "data" // TelemetryDir represent the parent directory of telemetry info TelemetryDir = "telemetry" // StorageParentDir represent the parent directory of running component StorageParentDir = "storage" // EnvNameInstanceDataDir represents the working directory of specific instance EnvNameInstanceDataDir = "TIUP_INSTANCE_DATA_DIR" // EnvNameComponentDataDir represents the working directory of specific component EnvNameComponentDataDir = "TIUP_COMPONENT_DATA_DIR" // EnvNameComponentInstallDir represents the install directory of specific component EnvNameComponentInstallDir = "TIUP_COMPONENT_INSTALL_DIR" // EnvNameWorkDir represents the work directory of TiUP where user type the command `tiup xxx` EnvNameWorkDir = "TIUP_WORK_DIR" // EnvNameUserInputVersion represents the version user specified when running a component by `tiup component:version` EnvNameUserInputVersion = "TIUP_USER_INPUT_VERSION" // EnvNameTiUPVersion represents the version of TiUP itself, not the version of component EnvNameTiUPVersion = "TIUP_VERSION" // EnvNameHome represents the environment name of tiup home directory EnvNameHome = "TIUP_HOME" // EnvNameTelemetryStatus represents the environment name of tiup telemetry status EnvNameTelemetryStatus = "TIUP_TELEMETRY_STATUS" // EnvNameTelemetryUUID represents the environment name of tiup telemetry uuid EnvNameTelemetryUUID = "TIUP_TELEMETRY_UUID" // EnvNameTelemetryEventUUID represents the environment name of tiup telemetry event uuid EnvNameTelemetryEventUUID = "TIUP_TELEMETRY_EVENT_UUID" // EnvNameTelemetrySecret represents the environment name of tiup telemetry secret EnvNameTelemetrySecret = "TIUP_TELEMETRY_SECRET" // EnvTag is the tag of the running component EnvTag = "TIUP_TAG" // EnvNameSSHPassPrompt is the variable name by which user specific the password prompt for sshpass EnvNameSSHPassPrompt = "TIUP_SSHPASS_PROMPT" // EnvNameNativeSSHClient is the variable name by which user can specific use native ssh client or not EnvNameNativeSSHClient = "TIUP_NATIVE_SSH" // EnvNameSSHPath is the variable name by which user can specific the executable ssh binary path EnvNameSSHPath = "TIUP_SSH_PATH" // EnvNameSCPPath is the variable name by which user can specific the executable scp binary path EnvNameSCPPath = "TIUP_SCP_PATH" // EnvNameKeepSourceTarget is the variable name by which user can keep the source target or not EnvNameKeepSourceTarget = "TIUP_KEEP_SOURCE_TARGET" // EnvNameMirrorSyncScript make it possible for user to sync mirror commit to other place (eg. CDN) EnvNameMirrorSyncScript = "TIUP_MIRROR_SYNC_SCRIPT" // EnvNameLogPath is the variable name by which user can write the log files into EnvNameLogPath = "TIUP_LOG_PATH" // EnvNameDebug is the variable name by which user can set tiup runs in debug mode(eg. print panic logs) EnvNameDebug = "TIUP_CLUSTER_DEBUG" // MetaFilename represents the process meta file name MetaFilename = "tiup_process_meta" ) tiup-1.16.3/pkg/localdata/profile.go000066400000000000000000000207131505422223000172550ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package localdata import ( "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "net/http" "os" "os/exec" "os/user" "path/filepath" "sort" "strings" "slices" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/mod/semver" ) // Profile represents the `tiup` profile type Profile struct { root string Config *TiUPConfig } // NewProfile returns a new profile instance func NewProfile(root string, config *TiUPConfig) *Profile { return &Profile{root: root, Config: config} } // InitProfile creates a new profile using environment variables and defaults. func InitProfile() *Profile { var profileDir string switch { case os.Getenv(EnvNameHome) != "": profileDir = os.Getenv(EnvNameHome) case DefaultTiUPHome != "": profileDir = DefaultTiUPHome default: u, err := user.Current() if err != nil { panic("cannot get current user information: " + err.Error()) } profileDir = filepath.Join(u.HomeDir, ProfileDirName) } cfg, err := InitConfig(profileDir) if err != nil { panic("cannot read config: " + err.Error()) } return NewProfile(profileDir, cfg) } // Path returns a full path which is related to profile root directory func (p *Profile) Path(relpath ...string) string { return filepath.Join(append([]string{p.root}, relpath...)...) } // Root returns the root path of the `tiup` func (p *Profile) Root() string { return p.root } // GetComponentInstalledVersion return the installed version of component. func (p *Profile) GetComponentInstalledVersion(component string, ver utils.Version) (utils.Version, error) { if !ver.IsEmpty() && ver.String() != utils.NightlyVersionAlias { return ver, nil } versions, err := p.InstalledVersions(component) if err != nil { return "", err } // Use the latest version if user doesn't specify a specific version // report an error if the specific component doesn't be installed // Check whether the specific version exist in local if len(versions) == 0 { return "", errors.Errorf("component not installed, please try `tiup install %s` to install it", component) } sort.Slice(versions, func(i, j int) bool { return semver.Compare(versions[i], versions[j]) < 0 }) if ver.String() != utils.NightlyVersionAlias { for i := len(versions); i > 0; i-- { if utils.Version(versions[i-1]).IsNightly() { return utils.Version(versions[i-1]), nil } } return "", errors.Errorf("component(nightly) not installed, please try `tiup install %s:nightly` to install it", component) } return utils.Version(versions[len(versions)-1]), nil } // ComponentInstalledPath returns the path where the component installed func (p *Profile) ComponentInstalledPath(component string, version utils.Version) (string, error) { installedVersion, err := p.GetComponentInstalledVersion(component, version) if err != nil { return "", err } return filepath.Join(p.Path(ComponentParentDir), component, installedVersion.String()), nil } func (p *Profile) saveTo(path string, data []byte, perm os.FileMode) error { fullPath := filepath.Join(p.root, path) // create sub directory if needed if err := utils.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { return errors.Trace(err) } return utils.WriteFile(fullPath, data, perm) } // WriteMetaFile writes process meta to instance/MetaFilename. func (p *Profile) WriteMetaFile(instance string, data *Process) error { metaFile := filepath.Join(DataParentDir, instance, MetaFilename) jsonData, err := json.MarshalIndent(data, "", " ") if err != nil { return errors.Trace(err) } return p.saveTo(metaFile, jsonData, 0644) } // readJSON read file and unmarshal to target `data` func (p *Profile) readJSON(path string, data any) error { fullPath := filepath.Join(p.root, path) file, err := os.Open(fullPath) if err != nil { return errors.Trace(err) } defer file.Close() return json.NewDecoder(file).Decode(data) } // ReadMetaFile reads a Process object from dirName/MetaFilename. Returns (nil, nil) if a metafile does not exist. func (p *Profile) ReadMetaFile(dirName string) (*Process, error) { metaFile := filepath.Join(DataParentDir, dirName, MetaFilename) // If the path doesn't contain the meta file, which means startup interrupted if utils.IsNotExist(p.Path(metaFile)) { return nil, nil } var process Process err := p.readJSON(metaFile, &process) return &process, err } // InstalledComponents returns the installed components func (p *Profile) InstalledComponents() ([]string, error) { compDir := filepath.Join(p.root, ComponentParentDir) fileInfos, err := os.ReadDir(compDir) if err != nil && os.IsNotExist(err) { return nil, nil } if err != nil { return nil, errors.Trace(err) } var components []string for _, fi := range fileInfos { if !fi.IsDir() { continue } components = append(components, fi.Name()) } sort.Strings(components) return components, nil } // InstalledVersions returns the installed versions of specific component func (p *Profile) InstalledVersions(component string) ([]string, error) { path := filepath.Join(p.root, ComponentParentDir, component) if utils.IsNotExist(path) { return nil, nil } fileInfos, err := os.ReadDir(path) if err != nil { return nil, errors.Trace(err) } var versions []string for _, fi := range fileInfos { if !fi.IsDir() { continue } sub, err := os.ReadDir(filepath.Join(path, fi.Name())) if err != nil || len(sub) < 1 { continue } versions = append(versions, fi.Name()) } return versions, nil } // VersionIsInstalled returns true if exactly version of component is installed. func (p *Profile) VersionIsInstalled(component, version string) (bool, error) { installed, err := p.InstalledVersions(component) if err != nil { return false, err } if slices.Contains(installed, version) { return true, nil } return false, nil } // ResetMirror reset root.json and cleanup manifests directory func (p *Profile) ResetMirror(addr, root string) error { // Calculating root.json path shaWriter := sha256.New() if _, err := io.Copy(shaWriter, strings.NewReader(addr)); err != nil { return err } localRoot := p.Path("bin", fmt.Sprintf("%s.root.json", hex.EncodeToString(shaWriter.Sum(nil))[:16])) if root == "" { switch { case utils.IsExist(localRoot): root = localRoot case strings.HasSuffix(addr, "/"): root = addr + "root.json" default: root = addr + "/root.json" } } // Fetch root.json var wc io.ReadCloser if strings.HasPrefix(root, "http") { resp, err := http.Get(root) if err != nil { return err } if resp.StatusCode != http.StatusOK { resp.Body.Close() return errors.Errorf("Fetch remote root.json returns http code %d", resp.StatusCode) } wc = resp.Body } else { file, err := os.Open(root) if err != nil { return err } wc = file } defer wc.Close() f, err := os.OpenFile(p.Path("bin", "root.json"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0664) if err != nil { return err } if _, err := io.Copy(f, wc); err != nil { f.Close() return err } f.Close() // Only cache remote mirror if strings.HasPrefix(addr, "http") && root != localRoot { if strings.HasPrefix(root, "http") && !strings.HasPrefix(root, "https") { fmt.Printf("WARN: Trusting component distribution key via insecure Internet: %s\n", root) fmt.Printf(" To revoke TiUP's trust, remove this file: %s\n", localRoot) } _ = utils.Copy(p.Path("bin", "root.json"), localRoot) } if err := os.RemoveAll(p.Path(ManifestParentDir)); err != nil { return err } p.Config.Mirror = addr return p.Config.Flush() } // Process represents a process as written to a meta file. type Process struct { Component string `json:"component"` CreatedTime string `json:"created_time"` Pid int `json:"pid"` // PID of the process Exec string `json:"exec"` // Path to the binary Args []string `json:"args,omitempty"` // Command line arguments Env []string `json:"env,omitempty"` // Environment variables Dir string `json:"dir,omitempty"` // Data directory Cmd *exec.Cmd `json:"-"` } tiup-1.16.3/pkg/localdata/profile_test.go000066400000000000000000000027361505422223000203210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package localdata import ( "os" "path" "testing" "github.com/google/uuid" "github.com/pingcap/tiup/pkg/utils" "github.com/stretchr/testify/require" ) func TestResetMirror(t *testing.T) { uuid := uuid.New().String() root := path.Join("/tmp", uuid) _ = os.Mkdir(root, 0o755) _ = os.Mkdir(path.Join(root, "bin"), 0o755) defer os.RemoveAll(root) cfg, _ := InitConfig(root) profile := NewProfile(root, cfg) require.NoError(t, profile.ResetMirror("https://tiup-mirrors.pingcap.com", "")) require.Error(t, profile.ResetMirror("https://example.com", "")) require.NoError(t, profile.ResetMirror("https://example.com", "https://tiup-mirrors.pingcap.com/root.json")) require.NoError(t, utils.Copy(path.Join(root, "bin"), path.Join(root, "mock-mirror"))) require.NoError(t, profile.ResetMirror(path.Join(root, "mock-mirror"), "")) require.Error(t, profile.ResetMirror(root, "")) require.NoError(t, profile.ResetMirror(root, path.Join(root, "mock-mirror", "root.json"))) } tiup-1.16.3/pkg/logger/000077500000000000000000000000001505422223000146165ustar00rootroot00000000000000tiup-1.16.3/pkg/logger/audit.go000066400000000000000000000034411505422223000162550ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logger import ( "bytes" "github.com/pingcap/tiup/pkg/cluster/audit" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/atomic" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) var auditEnabled atomic.Bool var auditBuffer *bytes.Buffer var auditDir string // EnableAuditLog enables audit log. func EnableAuditLog(dir string) { auditDir = dir auditEnabled.Store(true) } // DisableAuditLog disables audit log. func DisableAuditLog() { auditEnabled.Store(false) } func newAuditLogCore() zapcore.Core { auditBuffer = bytes.NewBuffer([]byte{}) encoder := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) return zapcore.NewCore(encoder, zapcore.Lock(zapcore.AddSync(auditBuffer)), zapcore.DebugLevel) } // OutputAuditLogToFileIfEnabled outputs audit log to specified fileSuffix if enabled. func OutputAuditLogToFileIfEnabled(dir, fileSuffix string) error { if !auditEnabled.Load() { return nil } if err := utils.MkdirAll(dir, 0755); err != nil { return err } err := audit.OutputAuditLog(dir, fileSuffix, auditBuffer.Bytes()) if err != nil { return err } if dir == auditDir { auditBuffer.Reset() } return nil } // OutputAuditLogIfEnabled outputs audit log if enabled. func OutputAuditLogIfEnabled() error { return OutputAuditLogToFileIfEnabled(auditDir, "") } tiup-1.16.3/pkg/logger/debug.go000066400000000000000000000036661505422223000162460ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logger import ( "bytes" "fmt" "os" "path/filepath" "time" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) var debugBuffer *bytes.Buffer func newDebugLogCore() zapcore.Core { debugBuffer = new(bytes.Buffer) encoder := zapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()) return zapcore.NewCore(encoder, zapcore.Lock(zapcore.AddSync(debugBuffer)), zapcore.DebugLevel) } // OutputDebugLog outputs debug log in the current working directory. func OutputDebugLog(prefix string) { logDir := os.Getenv(localdata.EnvNameLogPath) if logDir == "" { profile := localdata.InitProfile() logDir = profile.Path("logs") } if err := utils.MkdirAll(logDir, 0755); err != nil { _, _ = fmt.Fprintf(os.Stderr, "\nCreate debug logs(%s) directory failed %v.\n", logDir, err) return } // FIXME: Stupid go does not allow writing fraction seconds without a leading dot. fileName := time.Now().Format(fmt.Sprintf("%s-debug-2006-01-02-15-04-05.log", prefix)) filePath := filepath.Join(logDir, fileName) err := utils.WriteFile(filePath, debugBuffer.Bytes(), 0644) if err != nil { _, _ = tui.ColorWarningMsg.Fprint(os.Stderr, "\nWarn: Failed to write error debug log.\n") } else { _, _ = fmt.Fprintf(os.Stderr, "\nVerbose debug logs has been written to %s.\n", tui.ColorKeyword.Sprint(filePath)) } debugBuffer.Reset() } tiup-1.16.3/pkg/logger/logger.go000066400000000000000000000014271505422223000164300ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logger import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" ) // InitGlobalLogger initializes zap global logger. func InitGlobalLogger() { core := zapcore.NewTee( newAuditLogCore(), newDebugLogCore(), ) logger := zap.New(core) zap.ReplaceGlobals(logger) } tiup-1.16.3/pkg/logger/printer/000077500000000000000000000000001505422223000163015ustar00rootroot00000000000000tiup-1.16.3/pkg/logger/printer/log.go000066400000000000000000000060701505422223000174140ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logprinter import ( "encoding/json" "fmt" "io" "os" "strings" "go.uber.org/zap" ) var ( outputFmt = DisplayModeDefault // global output format of logger stdout io.Writer = os.Stdout stderr io.Writer = os.Stderr ) // DisplayMode control the output format type DisplayMode int // display modes const ( DisplayModeDefault DisplayMode = iota // default is the interactive output DisplayModePlain // plain text DisplayModeJSON // JSON ) func fmtDisplayMode(m string) DisplayMode { var dp DisplayMode switch strings.ToLower(m) { case "json": dp = DisplayModeJSON case "plain", "text": dp = DisplayModePlain default: dp = DisplayModeDefault } return dp } func printLog(w io.Writer, mode DisplayMode, level, format string, args ...any) { switch mode { case DisplayModeJSON: obj := struct { Level string `json:"level"` Msg string `json:"message"` }{ Level: level, Msg: fmt.Sprintf(format, args...), } data, err := json.Marshal(obj) if err != nil { _, _ = fmt.Fprintf(w, "{\"error\":\"%s\"}", err) return } _, _ = fmt.Fprint(w, string(data)+"\n") default: _, _ = fmt.Fprintf(w, format+"\n", args...) } } // SetDisplayMode changes the global output format of logger func SetDisplayMode(m DisplayMode) { outputFmt = m } // GetDisplayMode returns the current global output format func GetDisplayMode() DisplayMode { return outputFmt } // SetDisplayModeFromString changes the global output format of logger func SetDisplayModeFromString(m string) { outputFmt = fmtDisplayMode(m) } // Debugf output the debug message to console func Debugf(format string, args ...any) { zap.L().Debug(fmt.Sprintf(format, args...)) } // Infof output the log message to console // Deprecated: Use zap.L().Info() instead func Infof(format string, args ...any) { zap.L().Info(fmt.Sprintf(format, args...)) printLog(stdout, outputFmt, "info", format, args...) } // Warnf output the warning message to console // Deprecated: Use zap.L().Warn() instead func Warnf(format string, args ...any) { zap.L().Warn(fmt.Sprintf(format, args...)) printLog(stderr, outputFmt, "warn", format, args...) } // Errorf output the error message to console // Deprecated: Use zap.L().Error() instead func Errorf(format string, args ...any) { zap.L().Error(fmt.Sprintf(format, args...)) printLog(stderr, outputFmt, "error", format, args...) } // SetStdout redirect stdout to a custom writer func SetStdout(w io.Writer) { stdout = w } // SetStderr redirect stderr to a custom writer func SetStderr(w io.Writer) { stderr = w } tiup-1.16.3/pkg/logger/printer/logger.go000066400000000000000000000051551505422223000201150ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logprinter import ( "fmt" "io" "os" "go.uber.org/zap" ) // ContextKey is key used to store values in context type ContextKey string // ContextKeyLogger is the key used for logger stored in context const ContextKeyLogger ContextKey = "logger" // Logger is a set of functions writing output to custom writters, but still // using the global zap logger as our default config does not writes everything // to a memory buffer. // TODO: use also separate zap loggers type Logger struct { outputFmt DisplayMode // output format of logger stdout io.Writer stderr io.Writer } // NewLogger creates a Logger with default settings func NewLogger(m string) *Logger { return &Logger{ stdout: os.Stdout, stderr: os.Stderr, outputFmt: fmtDisplayMode(m), } } // SetStdout redirect stdout to a custom writer func (l *Logger) SetStdout(w io.Writer) { l.stdout = w } // SetStderr redirect stderr to a custom writer func (l *Logger) SetStderr(w io.Writer) { l.stderr = w } // SetDisplayMode changes the output format of logger func (l *Logger) SetDisplayMode(m DisplayMode) { l.outputFmt = m } // SetDisplayModeFromString changes the output format of logger func (l *Logger) SetDisplayModeFromString(m string) { l.outputFmt = fmtDisplayMode(m) } // GetDisplayMode returns the current output format func (l *Logger) GetDisplayMode() DisplayMode { return l.outputFmt } // Debugf output the debug message to console func (l *Logger) Debugf(format string, args ...any) { zap.L().Debug(fmt.Sprintf(format, args...)) } // Infof output the log message to console func (l *Logger) Infof(format string, args ...any) { zap.L().Info(fmt.Sprintf(format, args...)) printLog(l.stdout, l.outputFmt, "info", format, args...) } // Warnf output the warning message to console func (l *Logger) Warnf(format string, args ...any) { zap.L().Warn(fmt.Sprintf(format, args...)) printLog(l.stderr, l.outputFmt, "warn", format, args...) } // Errorf output the error message to console func (l *Logger) Errorf(format string, args ...any) { zap.L().Error(fmt.Sprintf(format, args...)) printLog(l.stderr, l.outputFmt, "error", format, args...) } tiup-1.16.3/pkg/logger/printer/verbose.go000066400000000000000000000020251505422223000202740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package logprinter import ( "fmt" "os" "strings" ) var verbose bool func init() { v := strings.ToLower(os.Getenv("TIUP_VERBOSE")) verbose = v == "1" || v == "enable" } // Verbose logs verbose messages func Verbose(format string, args ...any) { if !verbose { return } fmt.Fprintln(stderr, "Verbose:", fmt.Sprintf(format, args...)) } // Verbose logs verbose messages func (l *Logger) Verbose(format string, args ...any) { if !verbose { return } fmt.Fprintln(l.stderr, "Verbose:", fmt.Sprintf(format, args...)) } tiup-1.16.3/pkg/meta/000077500000000000000000000000001505422223000142655ustar00rootroot00000000000000tiup-1.16.3/pkg/meta/err.go000066400000000000000000000037711505422223000154140ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package meta import ( "fmt" "reflect" ) var ( // ErrValidate is an empty ValidateErr object, useful for type checking ErrValidate = &ValidateErr{} ) // error types const ( TypeConflict = "conflict" TypeMismatch = "mismatch" ) // ValidateErr is the error when meta validation fails with conflicts type ValidateErr struct { Type string // conflict type Target string // conflict Target Value any // conflict Value LHS string // object 1 RHS string // object 2 } // Error implements the error interface func (e *ValidateErr) Error() string { return fmt.Sprintf("%s %s for '%v' between '%s' and '%s'", e.Target, e.Type, e.Value, e.LHS, e.RHS) } // Unwrap implements the error interface func (e *ValidateErr) Unwrap() error { return nil } // Is implements the error interface func (e *ValidateErr) Is(target error) bool { t, ok := target.(*ValidateErr) if !ok { return false } // check for interface Value separately if e.Value != nil && t.Value != nil && (!reflect.ValueOf(e.Value).IsValid() && !reflect.ValueOf(t.Value).IsValid()) { return false } // not supporting non-comparable values for now if e.Value != nil && t.Value != nil && !(reflect.TypeOf(e.Value).Comparable() && reflect.TypeOf(t.Value).Comparable()) { return false } return (e.Type == t.Type || t.Type == "") && (e.Target == t.Target || t.Target == "") && (e.Value == t.Value || t.Value == nil || reflect.ValueOf(t.Value).IsZero()) && (e.LHS == t.LHS || t.LHS == "") && (e.RHS == t.RHS || t.RHS == "") } tiup-1.16.3/pkg/meta/err_test.go000066400000000000000000000053061505422223000164470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package meta import ( "errors" "testing" "github.com/stretchr/testify/require" ) func TestValidateErrIs(t *testing.T) { err0 := &ValidateErr{ Type: "dummy", Target: "test", LHS: "LHS", RHS: "RHS", Value: 1, } // identical errors are equal require.True(t, errors.Is(err0, err0)) require.True(t, errors.Is(ErrValidate, ErrValidate)) require.True(t, errors.Is(ErrValidate, &ValidateErr{})) require.True(t, errors.Is(&ValidateErr{}, ErrValidate)) // not equal for different error types require.False(t, errors.Is(err0, errors.New(""))) // default Value matches any error require.True(t, errors.Is(err0, ErrValidate)) // error with values are not matching default ones require.False(t, errors.Is(ErrValidate, err0)) err1 := &ValidateErr{ Type: TypeConflict, Target: "test", LHS: "LHS", RHS: "RHS", Value: 2, } require.True(t, errors.Is(err1, ErrValidate)) // errors with different values are not equal require.False(t, errors.Is(err0, err1)) require.False(t, errors.Is(err1, err0)) // errors with different types are not equal err0.Value = 2 require.False(t, errors.Is(err0, err1)) require.False(t, errors.Is(err1, err0)) err2 := &ValidateErr{ Type: TypeMismatch, Target: "test", LHS: "LHS", RHS: "RHS", Value: map[string]any{ "key1": 1, "key2": "2", }, } require.True(t, errors.Is(err2, ErrValidate)) require.False(t, errors.Is(err1, err2)) require.False(t, errors.Is(err2, err1)) err3 := &ValidateErr{ Type: TypeMismatch, Target: "test", LHS: "LHS", RHS: "RHS", Value: []float64{1.0, 2.0}, } require.True(t, errors.Is(err3, ErrValidate)) // different values are not equal require.False(t, errors.Is(err2, err3)) require.False(t, errors.Is(err3, err2)) err4 := &ValidateErr{ Type: TypeMismatch, Target: "test", LHS: "LHS", RHS: "RHS", Value: nil, } require.True(t, errors.Is(err4, ErrValidate)) // nil Value matches any error if other fields are with the same values require.True(t, errors.Is(err3, err4)) require.False(t, errors.Is(err4, err3)) err4.Value = 0 require.True(t, errors.Is(err4, ErrValidate)) require.False(t, errors.Is(err3, err4)) require.False(t, errors.Is(err4, err3)) } tiup-1.16.3/pkg/meta/paths.go000066400000000000000000000016151505422223000157360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package meta import ( "fmt" ) // DirPaths stores the paths needed for component to put files type DirPaths struct { Deploy string Data []string Log string Cache string } // String implements the fmt.Stringer interface func (p DirPaths) String() string { return fmt.Sprintf( "deploy_dir=%s, data_dir=%v, log_dir=%s, cache_dir=%s", p.Deploy, p.Data, p.Log, p.Cache, ) } tiup-1.16.3/pkg/meta/resource_ctrl.go000066400000000000000000000025731505422223000174760ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package meta // ResourceControl is used to control the system resource // See: https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html type ResourceControl struct { MemoryLimit string `yaml:"memory_limit,omitempty" validate:"memory_limit:editable"` CPUQuota string `yaml:"cpu_quota,omitempty" validate:"cpu_quota:editable"` IOReadBandwidthMax string `yaml:"io_read_bandwidth_max,omitempty" validate:"io_read_bandwidth_max:editable"` IOWriteBandwidthMax string `yaml:"io_write_bandwidth_max,omitempty" validate:"io_write_bandwidth_max:editable"` LimitCORE string `yaml:"limit_core,omitempty" validate:"limit_core:editable"` TimeoutStopSec string `yaml:"timeout_stop_sec,omitempty" validate:"timeout_stop_sec:editable"` TimeoutStartSec string `yaml:"timeout_start_sec,omitempty" validate:"timeout_start_sec:editable"` } tiup-1.16.3/pkg/proxy/000077500000000000000000000000001505422223000145205ustar00rootroot00000000000000tiup-1.16.3/pkg/proxy/http_proxy.go000066400000000000000000000141651505422223000172760ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "context" "io" "net" "net/http" "strconv" "sync" "time" "github.com/appleboy/easyssh-proxy" perrs "github.com/pingcap/errors" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "go.uber.org/zap" "golang.org/x/crypto/ssh" ) // HTTPProxy stands for a http proxy based on SSH connection type HTTPProxy struct { cli *ssh.Client config *easyssh.MakeConfig l sync.RWMutex tr *http.Transport logger *logprinter.Logger } // NewHTTPProxy creates and initializes a new http proxy func NewHTTPProxy(host string, port int, user, password, keyFile, passphrase string, logger *logprinter.Logger, ) *HTTPProxy { p := &HTTPProxy{ config: &easyssh.MakeConfig{ Server: host, Port: strconv.Itoa(port), User: user, Timeout: 10 * time.Second, }, logger: logger, } if len(keyFile) > 0 { p.config.KeyPath = keyFile p.config.Passphrase = passphrase } else if len(password) > 0 { p.config.Password = password } dial := func(ctx context.Context, network, addr string) (net.Conn, error) { p.l.RLock() cli := p.cli p.l.RUnlock() // reuse the old client if dial success if cli != nil { c, err := cli.Dial(network, addr) if err == nil { return c, nil } } // create a new ssh client // timeout is implemented inside easyssh, don't need to repeat the implementation _, cli, err := p.config.Connect() if err != nil { return nil, perrs.Annotate(err, "connect to ssh proxy") } p.l.Lock() p.cli = cli p.l.Unlock() return cli.Dial(network, addr) } p.tr = &http.Transport{DialContext: dial} return p } // ServeHTTP implements http.Handler func (s *HTTPProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodConnect: s.connect(w, r) default: r.RequestURI = "" removeHopHeaders(r.Header) s.serve(w, r) } } // connect handles the CONNECT request // Data flow: // 1. Receive CONNECT request from the client // 2. Dial the remote server(the one client want to conenct) // 3. Send 200 OK to client if the connection is established // 4. Exchange data between client and server func (s *HTTPProxy) connect(w http.ResponseWriter, r *http.Request) { // Use Hijacker to get the underlying connection hij, ok := w.(http.Hijacker) if !ok { http.Error(w, "Server does not support Hijacker", http.StatusInternalServerError) return } // connect the remote client directly dst, err := s.tr.DialContext(context.Background(), "tcp", r.URL.Host) if err != nil { if nerr, ok := err.(net.Error); ok && nerr.Timeout() { zap.L().Debug("CONNECT roundtrip proxy timeout") return } zap.L().Debug("CONNECT roundtrip proxy", zap.String("error", err.Error())) http.Error(w, err.Error(), http.StatusInternalServerError) return } defer dst.Close() src, _, err := hij.Hijack() if err != nil { zap.L().Debug("CONNECT hijack", zap.String("error", err.Error())) http.Error(w, err.Error(), http.StatusInternalServerError) return } defer src.Close() // Once connected successfully, return OK _, _ = src.Write([]byte("HTTP/1.1 200 OK\r\n\r\n")) // Proxy is no need to know anything, just exchange data between the client // the the remote server. var wg sync.WaitGroup copyAndWait := func(dst, src net.Conn) { defer wg.Done() _, err := io.Copy(dst, src) if err != nil { zap.L().Error("CONNECT copy response", zap.Any("src", src), zap.Any("dst", dst)) } if tcpConn, ok := dst.(interface{ CloseWrite() error }); ok { _ = tcpConn.CloseWrite() } } wg.Add(2) go copyAndWait(dst, src) // client to remote go copyAndWait(src, dst) // remote to client wg.Wait() } // serve handles the original http request // Data flow: // 1. Receive request R1 from client // 2. Re-post request R1 to remote server(the one client want to connect) // 3. Receive response P1 from remote server // 4. Send response P1 to client func (s *HTTPProxy) serve(w http.ResponseWriter, r *http.Request) { // Client.Do is different from DefaultTransport.RoundTrip ... // Client.Do will change some part of request as a new request of the server. // The underlying RoundTrip never changes anything of the request. resp, err := s.tr.RoundTrip(r) if err != nil { if nerr, ok := err.(net.Error); ok && nerr.Timeout() { zap.L().Debug("PROXY roundtrip proxy timeout") return } zap.L().Debug("PROXY roundtrip proxy", zap.String("error", err.Error())) http.Error(w, err.Error(), http.StatusInternalServerError) return } defer resp.Body.Close() // please prepare header first and write them copyHeaders(w, resp) w.WriteHeader(resp.StatusCode) _, err = io.Copy(w, resp.Body) if err != nil { zap.L().Error("PROXY copy response", zap.String("error", err.Error())) http.Error(w, err.Error(), http.StatusInternalServerError) } } // Hop-by-hop headers. These are removed when sent to the backend. // http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html var hopHeaders = []string{ // If no Accept-Encoding header exists, Transport will add the headers it can accept // and would wrap the response body with the relevant reader. "Accept-Encoding", "Connection", "Keep-Alive", "Proxy-Authenticate", "Proxy-Authorization", "Proxy-Connection", "Te", // canonicalized version of "TE" "Trailers", "Transfer-Encoding", "Upgrade", } // removeHopHeaders removes the hop-by-hop headers func removeHopHeaders(h http.Header) { for _, k := range hopHeaders { h.Del(k) } } // copy and overwrite headers from r to w func copyHeaders(w http.ResponseWriter, r *http.Response) { // copy headers dst, src := w.Header(), r.Header for k := range dst { dst.Del(k) } for k, vs := range src { for _, v := range vs { dst.Add(k, v) } } } tiup-1.16.3/pkg/proxy/proxy.go000066400000000000000000000046661505422223000162440ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "context" "fmt" "net/http" "os" "sync/atomic" "github.com/fatih/color" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/tui" "github.com/pingcap/tiup/pkg/utils" ) var ( httpProxy *http.Server tcpProxy atomic.Value ) // MaybeStartProxy maybe starts an inner http/tcp proxies func MaybeStartProxy( host string, port int, user string, usePass bool, identity string, logger *logprinter.Logger, ) error { if len(host) == 0 { return nil } sshProps, err := tui.ReadIdentityFileOrPassword(identity, usePass) if err != nil { return err } httpPort := utils.MustGetFreePort("127.0.0.1", 12345, 0) addr := fmt.Sprintf("127.0.0.1:%d", httpPort) // TODO: Using environment variables to share data may not be a good idea os.Setenv("TIUP_INNER_HTTP_PROXY", "http://"+addr) httpProxy = &http.Server{ Addr: addr, Handler: NewHTTPProxy( host, port, user, sshProps.Password, sshProps.IdentityFile, sshProps.IdentityFilePassphrase, logger, ), } logger.Infof("%s", color.HiGreenString("Start HTTP inner proxy %s", httpProxy.Addr)) go func() { if err := httpProxy.ListenAndServe(); err != nil && err != http.ErrServerClosed { logger.Errorf("Failed to listen HTTP proxy: %v", err) } }() p := NewTCPProxy( host, port, user, sshProps.Password, sshProps.IdentityFile, sshProps.IdentityFilePassphrase, logger, ) tcpProxy.Store(p) logger.Infof("%s", color.HiGreenString("Start TCP inner proxy %s", p.endpoint)) return nil } // MaybeStopProxy stops the http/tcp proxies if it has been started before func MaybeStopProxy() { if httpProxy != nil { _ = httpProxy.Shutdown(context.Background()) os.Unsetenv("TIUP_INNER_HTTP_PROXY") } if p := tcpProxy.Load(); p != nil { _ = p.(*TCPProxy).Stop() } } // GetTCPProxy returns the tcp proxy func GetTCPProxy() *TCPProxy { p := tcpProxy.Load() if p != nil { return p.(*TCPProxy) } return nil } tiup-1.16.3/pkg/proxy/tcp_proxy.go000066400000000000000000000102251505422223000170760ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "fmt" "io" "net" "strconv" "sync" "sync/atomic" "time" "github.com/appleboy/easyssh-proxy" perrs "github.com/pingcap/errors" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/utils" "go.uber.org/zap" "golang.org/x/crypto/ssh" ) // TCPProxy represents a simple TCP proxy // unlike HTTP proxies, TCP proxies are point-to-point type TCPProxy struct { l sync.RWMutex listener net.Listener cli *ssh.Client config *easyssh.MakeConfig closed int32 endpoint string logger *logprinter.Logger } // NewTCPProxy starts a 1to1 TCP proxy func NewTCPProxy( host string, port int, user, password, keyFile, passphrase string, logger *logprinter.Logger, ) *TCPProxy { p := &TCPProxy{ config: &easyssh.MakeConfig{ Server: host, Port: strconv.Itoa(port), User: user, Timeout: 10 * time.Second, }, logger: logger, } if len(keyFile) > 0 { p.config.KeyPath = keyFile p.config.Passphrase = passphrase } else if len(password) > 0 { p.config.Password = password } port = utils.MustGetFreePort("127.0.0.1", 22345, 0) p.endpoint = fmt.Sprintf("127.0.0.1:%d", port) listener, err := net.Listen("tcp", p.endpoint) if err != nil { logger.Errorf("net.Listen error: %v", err) return nil } p.listener = listener return p } // GetEndpoints returns the endpoint list func (p *TCPProxy) GetEndpoints() []string { return []string{p.endpoint} } // Stop stops the tcp proxy func (p *TCPProxy) Stop() error { atomic.StoreInt32(&p.closed, 1) return p.listener.Close() } // Run runs proxy all traffic to upstream func (p *TCPProxy) Run(upstream []string) chan struct{} { closeC := make(chan struct{}) go func() { FOR_LOOP: for { select { case <-closeC: return default: localConn, err := p.listener.Accept() if err != nil { if atomic.LoadInt32(&p.closed) == 1 { break FOR_LOOP } p.logger.Errorf("tcp proxy accept error: %v", err) continue FOR_LOOP } go p.forward(localConn, upstream) } } }() return closeC } // Close closes a proxy func (p *TCPProxy) Close(c chan struct{}) { close(c) } func (p *TCPProxy) getConn() (*ssh.Client, error) { p.l.RLock() cli := p.cli p.l.RUnlock() // reuse the old client if dial success if cli != nil { return cli, nil } // create a new ssh client _, cli, err := p.config.Connect() if err != nil { return nil, perrs.Annotate(err, "connect to ssh proxy") } p.l.Lock() p.cli = cli p.l.Unlock() return cli, nil } func (p *TCPProxy) forward(localConn io.ReadWriter, endpoints []string) { cli, err := p.getConn() if err != nil { zap.L().Error("Failed to get ssh client", zap.String("error", err.Error())) return } var remoteConn net.Conn OUTER_LOOP: for _, endpoint := range endpoints { errC := make(chan error, 1) go func() { var err error remoteConn, err = cli.Dial("tcp", endpoint) if err != nil { zap.L().Error("Failed to connect endpoint", zap.String("error", err.Error())) } errC <- err }() select { case err := <-errC: if err == nil { break OUTER_LOOP } case <-time.After(5 * time.Second): zap.L().Debug("Connect to endpoint timeout, retry the next endpoint", zap.String("endpoint", endpoint)) } } var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() _, err = io.Copy(remoteConn, localConn) if err != nil { zap.L().Error("Failed to copy from local to remote", zap.String("error", err.Error())) } }() go func() { defer wg.Done() _, err = io.Copy(localConn, remoteConn) if err != nil { zap.L().Error("Failed to copy from remote to local", zap.String("error", err.Error())) } }() wg.Wait() } tiup-1.16.3/pkg/queue/000077500000000000000000000000001505422223000144635ustar00rootroot00000000000000tiup-1.16.3/pkg/queue/any_queue.go000066400000000000000000000022041505422223000170030ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package queue import "slices" // AnyQueue is a queue stores any type AnyQueue struct { eq func(a any, b any) bool slice []any } // NewAnyQueue builds a AnyQueue func NewAnyQueue(eq func(a any, b any) bool, aa ...any) *AnyQueue { return &AnyQueue{eq, aa} } // Get returns previous stored value that equals to val and remove it from the queue, if not found, return nil func (q *AnyQueue) Get(val any) any { for i, a := range q.slice { if q.eq(a, val) { q.slice = slices.Delete(q.slice, i, i+1) return a } } return nil } // Put inserts `val` into `q`. func (q *AnyQueue) Put(val any) { q.slice = append(q.slice, val) } tiup-1.16.3/pkg/queue/any_queue_test.go000066400000000000000000000020751505422223000200500ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package queue import ( "reflect" "testing" "github.com/stretchr/testify/require" ) func TestAnyQueue(t *testing.T) { q := NewAnyQueue(reflect.DeepEqual) q.Put(true) q.Put(9527) require.Equal(t, true, q.slice[0]) require.Equal(t, 9527, q.slice[1]) q.Put(true) q.Put(9527) require.Equal(t, true, q.slice[2]) require.Equal(t, 9527, q.slice[3]) require.Equal(t, true, q.Get(true)) require.Equal(t, true, q.Get(true)) require.Nil(t, q.Get(true)) require.Equal(t, 9527, q.Get(9527)) require.Equal(t, 9527, q.Get(9527)) require.Nil(t, q.Get(9527)) } tiup-1.16.3/pkg/repository/000077500000000000000000000000001505422223000155565ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/clone_mirror.go000066400000000000000000000335031505422223000206030ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "fmt" "os" "path" "path/filepath" "strings" "time" "github.com/pingcap/tiup/pkg/cluster/template/install" "github.com/pingcap/errors" ru "github.com/pingcap/tiup/pkg/repository/utils" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/sync/errgroup" ) const defaultJobs = 1 // CloneOptions represents the options of clone a remote mirror type CloneOptions struct { Archs []string OSs []string Versions []string Full bool Components map[string]*[]string Prefix bool Jobs uint } // CloneMirror clones a local mirror from the remote repository func CloneMirror(repo Repository, components []string, targetDir string, selectedVersions []string, options CloneOptions) error { if strings.TrimRight(targetDir, "/") == strings.TrimRight(repo.Mirror().Source(), "/") { return errors.Errorf("Refusing to clone from %s to %s", targetDir, repo.Mirror().Source()) } fmt.Printf("Start to clone mirror, targetDir is %s, source mirror is %s, selectedVersions are [%s]\n", targetDir, repo.Mirror().Source(), strings.Join(selectedVersions, ",")) fmt.Println("If this does not meet expectations, please abort this process, read `tiup mirror clone --help` and run again") if err := utils.MkdirAll(targetDir, 0755); err != nil { return err } // Temporary directory is used to save the unverified tarballs tmpDir := filepath.Join(targetDir, fmt.Sprintf("_tmp_%d", time.Now().UnixNano())) keyDir := filepath.Join(targetDir, "keys") if err := utils.MkdirAll(tmpDir, 0755); err != nil { return err } if err := utils.MkdirAll(keyDir, 0755); err != nil { return err } defer os.RemoveAll(tmpDir) fmt.Println("Arch", options.Archs) fmt.Println("OS", options.OSs) if len(options.OSs) == 0 || len(options.Archs) == 0 { return nil } var ( initTime = time.Now() expiresAt = initTime.Add(50 * 365 * 24 * time.Hour) root = v1manifest.NewRoot(initTime) index = v1manifest.NewIndex(initTime) ) // All offline expires at 50 years to prevent manifests stale root.SetExpiresAt(expiresAt) index.SetExpiresAt(expiresAt) keys := map[string][]*v1manifest.KeyInfo{} for _, ty := range []string{ v1manifest.ManifestTypeRoot, v1manifest.ManifestTypeIndex, v1manifest.ManifestTypeSnapshot, v1manifest.ManifestTypeTimestamp, } { if err := v1manifest.GenAndSaveKeys(keys, ty, int(v1manifest.ManifestsConfig[ty].Threshold), keyDir); err != nil { return err } } // initial manifests manifests := map[string]v1manifest.ValidManifest{ v1manifest.ManifestTypeRoot: root, v1manifest.ManifestTypeIndex: index, } signedManifests := make(map[string]*v1manifest.Manifest) genkey := func() (string, *v1manifest.KeyInfo, error) { priv, err := v1manifest.GenKeyInfo() if err != nil { return "", nil, err } id, err := priv.ID() if err != nil { return "", nil, err } return id, priv, nil } // Initialize the index manifest ownerkeyID, ownerkeyInfo, err := genkey() if err != nil { return errors.Trace(err) } // save owner key if _, err := v1manifest.SaveKeyInfo(ownerkeyInfo, "pingcap", keyDir); err != nil { return errors.Trace(err) } ownerkeyPub, err := ownerkeyInfo.Public() if err != nil { return errors.Trace(err) } index.Owners["pingcap"] = v1manifest.Owner{ Name: "PingCAP", Keys: map[string]*v1manifest.KeyInfo{ ownerkeyID: ownerkeyPub, }, Threshold: 1, } snapshot := v1manifest.NewSnapshot(initTime) snapshot.SetExpiresAt(expiresAt) componentManifests, err := cloneComponents(repo, components, selectedVersions, targetDir, tmpDir, options) if err != nil { return err } for name, component := range componentManifests { component.SetExpiresAt(expiresAt) fname := fmt.Sprintf("%s.json", name) // TODO: support external owner signedManifests[component.ID], err = v1manifest.SignManifest(component, ownerkeyInfo) if err != nil { return err } index.Components[component.ID] = v1manifest.ComponentItem{ Owner: "pingcap", URL: fmt.Sprintf("/%s", fname), } } // sign index and snapshot signedManifests[v1manifest.ManifestTypeIndex], err = v1manifest.SignManifest(index, keys[v1manifest.ManifestTypeIndex]...) if err != nil { return err } // snapshot and timestamp are the last two manifests to be initialized // Initialize timestamp timestamp := v1manifest.NewTimestamp(initTime) timestamp.SetExpiresAt(expiresAt) manifests[v1manifest.ManifestTypeTimestamp] = timestamp manifests[v1manifest.ManifestTypeSnapshot] = snapshot // Initialize the root manifest for _, m := range manifests { if err := root.SetRole(m, keys[m.Base().Ty]...); err != nil { return err } } // Sign root signedManifests[v1manifest.ManifestTypeRoot], err = v1manifest.SignManifest(root, keys[v1manifest.ManifestTypeRoot]...) if err != nil { return err } // init snapshot snapshot, err = snapshot.SetVersions(signedManifests) if err != nil { return err } signedManifests[v1manifest.ManifestTypeSnapshot], err = v1manifest.SignManifest(snapshot, keys[v1manifest.ManifestTypeSnapshot]...) if err != nil { return err } timestamp, err = timestamp.SetSnapshot(signedManifests[v1manifest.ManifestTypeSnapshot]) if err != nil { return errors.Trace(err) } signedManifests[v1manifest.ManifestTypeTimestamp], err = v1manifest.SignManifest(timestamp, keys[v1manifest.ManifestTypeTimestamp]...) if err != nil { return err } for _, m := range signedManifests { fname := filepath.Join(targetDir, m.Signed.Filename()) switch m.Signed.Base().Ty { case v1manifest.ManifestTypeRoot: err := v1manifest.WriteManifestFile(FnameWithVersion(fname, m.Signed.Base().Version), m) if err != nil { return err } // A copy of the newest version which is 1. err = v1manifest.WriteManifestFile(fname, m) if err != nil { return err } case v1manifest.ManifestTypeComponent, v1manifest.ManifestTypeIndex: err := v1manifest.WriteManifestFile(FnameWithVersion(fname, m.Signed.Base().Version), m) if err != nil { return err } default: err = v1manifest.WriteManifestFile(fname, m) if err != nil { return err } } } return install.WriteLocalInstallScript(filepath.Join(targetDir, "local_install.sh")) } func cloneComponents(repo Repository, components, selectedVersions []string, targetDir, tmpDir string, options CloneOptions) (map[string]*v1manifest.Component, error) { compManifests := map[string]*v1manifest.Component{} jobs := options.Jobs if jobs <= 0 { jobs = defaultJobs } errG := &errgroup.Group{} tickets := make(chan struct{}, jobs) defer func() { close(tickets) }() for _, name := range components { manifest, err := repo.GetComponentManifest(name, true) if err != nil { return nil, errors.Annotatef(err, "fetch component '%s' manifest failed", name) } vs, err := combineVersions(options.Components[name], manifest, options.OSs, options.Archs, selectedVersions) if err != nil { return nil, err } var newManifest *v1manifest.Component if options.Full { newManifest = manifest } else { if len(vs) < 1 { continue } newManifest = &v1manifest.Component{ SignedBase: manifest.SignedBase, ID: manifest.ID, Description: manifest.Description, Platforms: map[string]map[string]v1manifest.VersionItem{}, } // Include the nightly reference version if vs.Exist(utils.NightlyVersionAlias) { newManifest.Nightly = manifest.Nightly vs.Insert(manifest.Nightly) } } platforms := []string{} for _, goos := range options.OSs { for _, goarch := range options.Archs { platforms = append(platforms, PlatformString(goos, goarch)) } } if len(platforms) > 0 { platforms = append(platforms, v1manifest.AnyPlatform) } for _, platform := range platforms { for v, versionItem := range manifest.Platforms[platform] { if !options.Full { newVersions := newManifest.Platforms[platform] if newVersions == nil { newVersions = map[string]v1manifest.VersionItem{} newManifest.Platforms[platform] = newVersions } newVersions[v] = versionItem if !checkVersion(options, vs, v) { versionItem.Yanked = true newVersions[v] = versionItem continue } } if _, err := repo.GetComponentManifest(name, false); err != nil || versionItem.Yanked { // The component or the version is yanked, skip download binary continue } name, versionItem := name, versionItem tickets <- struct{}{} errG.Go(func() error { defer func() { <-tickets }() err := download(targetDir, tmpDir, repo, &versionItem) if err != nil { return errors.Annotatef(err, "download resource: %s", name) } return nil }) } } compManifests[name] = newManifest } if err := errG.Wait(); err != nil { return nil, err } // Download TiUP binary for _, goos := range options.OSs { for _, goarch := range options.Archs { url := fmt.Sprintf("/tiup-%s-%s.tar.gz", goos, goarch) dstFile := filepath.Join(targetDir, url) tmpFile := filepath.Join(tmpDir, url) if err := repo.Mirror().Download(url, tmpDir); err != nil { if errors.Cause(err) == ErrNotFound { fmt.Printf("TiUP doesn't have %s/%s, skipped\n", goos, goarch) continue } return nil, err } // Move file to target directory if hashes pass verify. if err := os.Rename(tmpFile, dstFile); err != nil { return nil, err } } } return compManifests, nil } func download(targetDir, tmpDir string, repo Repository, item *v1manifest.VersionItem) error { validate := func(dir string) error { hashes, n, err := ru.HashFile(path.Join(dir, item.URL)) if err != nil { return err } if uint(n) != item.Length { return errors.Errorf("file length mismatch, expected: %d, got: %v", item.Length, n) } for algo, hash := range item.Hashes { h, found := hashes[algo] if !found { continue } if h != hash { return errors.Errorf("file %s hash mismatch, expected: %s, got: %s", algo, hash, h) } } return nil } dstFile := filepath.Join(targetDir, item.URL) tmpFile := filepath.Join(tmpDir, item.URL) // Skip installed file if exists file valid if utils.IsExist(dstFile) { if err := validate(targetDir); err == nil { fmt.Println("Skipping existing file:", filepath.Join(targetDir, item.URL)) return nil } } err := repo.Mirror().Download(item.URL, tmpDir) if err != nil { return err } if err := validate(tmpDir); err != nil { return err } // Move file to target directory if hashes pass verify. return os.Rename(tmpFile, dstFile) } func checkVersion(options CloneOptions, versions set.StringSet, version string) bool { if options.Full || versions.Exist("all") || versions.Exist(version) { return true } // prefix match for v := range versions { if options.Prefix && strings.HasPrefix(version, v) { return true } else if version == v { return true } } return false } func combineVersions(componentVersions *[]string, manifest *v1manifest.Component, oss, archs, globalVersions []string) (set.StringSet, error) { result := set.NewStringSet() for _, os := range oss { for _, arch := range archs { platform := PlatformString(os, arch) versionList := manifest.VersionList(platform) // set specified version with latest tag if result.Exist(utils.LatestVersionAlias) { latest := manifest.LatestVersion(platform) if latest != "" { result.Insert(latest) } } if componentVersions != nil && len(*componentVersions) > 0 { for _, selectedVersion := range *componentVersions { fmt.Printf("%s %s/%s selected version is %s\n", manifest.ID, os, arch, selectedVersion) if selectedVersion == utils.NightlyVersionAlias { selectedVersion = manifest.Nightly } if selectedVersion == utils.LatestVersionAlias { latest := manifest.LatestVersion(platform) if latest != "" { fmt.Printf("%s %s/%s found the latest version %s\n", manifest.ID, os, arch, latest) // set latest version selectedVersion = latest } } _, found := versionList[selectedVersion] if !found { return nil, errors.Errorf("version %s not found in %s %s/%s", selectedVersion, manifest.ID, os, arch) } result.Insert(selectedVersion) } } else { for _, selectedVersion := range globalVersions { if selectedVersion == utils.NightlyVersionAlias { selectedVersion = manifest.Nightly } if selectedVersion == utils.LatestVersionAlias { latest := manifest.LatestVersion(platform) if latest == "" { continue } fmt.Printf("%s %s/%s found the latest version %s\n", manifest.ID, os, arch, latest) // set latest version selectedVersion = latest } _, found := versionList[selectedVersion] // Some TiUP components won't be bound version with TiDB, if cannot find // selected version we download the latest version to as a alternative if !found { // Use the latest stable versionS if the selected version doesn't exist in specific platform latest := manifest.LatestVersion(platform) if latest == "" { continue } if selectedVersion != utils.LatestVersionAlias { fmt.Printf("%s %s/%s %s not found, using %s instead.\n", manifest.ID, os, arch, selectedVersion, latest) } selectedVersion = latest } result.Insert(selectedVersion) } } } } return result, nil } tiup-1.16.3/pkg/repository/constant.go000066400000000000000000000017771505422223000177520ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository const ( // ManifestFileName is the filename of the manifest. ManifestFileName = "tiup-manifest.index" // DefaultMirror is the location of the mirror to use if none is specified by the user via `EnvMirrors`. DefaultMirror = "https://tiup-mirrors.pingcap.com/" // EnvMirrors is the name of an env var the user can set to specify a mirror. EnvMirrors = "TIUP_MIRRORS" // TiUPBinaryName is the name of the tiup binary, both in the repository and locally. TiUPBinaryName = "tiup" ) tiup-1.16.3/pkg/repository/merge_mirror.go000066400000000000000000000167721505422223000206130ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "fmt" "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/repository/model" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" ) type diffItem struct { name string componentItem v1manifest.ComponentItem versionItem v1manifest.VersionItem version string os string arch string desc string } // returns component that exists in addition but not in base func diffMirror(base, addition Mirror) ([]diffItem, error) { baseIndex, err := fetchIndexManifestFromMirror(base) if err != nil { return nil, err } additionIndex, err := fetchIndexManifestFromMirror(addition) if err != nil { return nil, err } items := []diffItem{} baseComponents := baseIndex.ComponentListWithYanked() additionComponents := additionIndex.ComponentList() for name, comp := range additionComponents { if baseComponents[name].Yanked { continue } baseComponent, err := fetchComponentManifestFromMirror(base, name) if err != nil { return nil, err } additionComponent, err := fetchComponentManifestFromMirror(addition, name) if err != nil { return nil, err } items = append(items, component2Diff(name, baseComponents[name], baseComponent, comp, additionComponent)...) } return items, nil } func component2Diff(name string, baseItem v1manifest.ComponentItem, baseManifest *v1manifest.Component, additionItem v1manifest.ComponentItem, additionManifest *v1manifest.Component) []diffItem { items := []diffItem{} for plat := range additionManifest.Platforms { versions := additionManifest.VersionList(plat) for ver, verinfo := range versions { // Don't merge nightly this time if utils.Version(ver).IsNightly() { continue } // this version not exits in base if baseManifest.VersionList(plat)[ver].URL == "" { osArch := strings.Split(plat, "/") if len(osArch) != 2 { continue } item := diffItem{ name: name, componentItem: baseItem, versionItem: verinfo, version: ver, os: osArch[0], arch: osArch[1], desc: additionManifest.Description, } if baseItem.URL == "" { item.componentItem = additionItem } items = append(items, item) } } } return items } // MergeMirror merges two or more mirrors func MergeMirror(keys map[string]*v1manifest.KeyInfo, base Mirror, additions ...Mirror) error { ownerKeys, err := mapOwnerKeys(base, keys) if err != nil { return err } for _, addition := range additions { diffs, err := diffMirror(base, addition) if err != nil { return err } for _, diff := range diffs { if len(ownerKeys[diff.componentItem.Owner]) == 0 { return errors.Errorf("missing owner keys for owner %s on component %s", diff.componentItem.Owner, diff.name) } comp, err := fetchComponentManifestFromMirror(base, diff.name) if err != nil { return err } comp = UpdateManifestForPublish(comp, diff.name, diff.version, diff.versionItem.Entry, diff.os, diff.arch, diff.desc, diff.versionItem.FileHash) manifest, err := v1manifest.SignManifest(comp, ownerKeys[diff.componentItem.Owner]...) if err != nil { return err } resource := strings.TrimPrefix(diff.versionItem.URL, "/") tarfile, err := addition.Fetch(resource, 0) if err != nil { return err } defer tarfile.Close() publishInfo := &model.PublishInfo{ ComponentData: &model.TarInfo{Reader: tarfile, Name: resource}, Stand: &diff.componentItem.Standalone, Hide: &diff.componentItem.Hidden, } if err := base.Publish(manifest, publishInfo); err != nil { return err } } } return nil } func fetchComponentManifestFromMirror(mirror Mirror, component string) (*v1manifest.Component, error) { r, err := mirror.Fetch(v1manifest.ManifestFilenameSnapshot, 0) if err != nil { return nil, err } defer r.Close() snap := v1manifest.Snapshot{} if _, err := v1manifest.ReadNoVerify(r, &snap); err != nil { return nil, err } v := snap.Meta[fmt.Sprintf("/%s.json", component)].Version if v == 0 { // nil means that the component manifest not found return nil, nil } r, err = mirror.Fetch(fmt.Sprintf("%d.%s.json", v, component), 0) if err != nil { return nil, err } defer r.Close() role := v1manifest.Component{} // TODO: this time we just assume the addition mirror is trusted if _, err := v1manifest.ReadNoVerify(r, &role); err != nil { return nil, err } return &role, nil } func fetchIndexManifestFromMirror(mirror Mirror) (*v1manifest.Index, error) { r, err := mirror.Fetch(v1manifest.ManifestFilenameSnapshot, 0) if err != nil { return nil, err } defer r.Close() snap := v1manifest.Snapshot{} if _, err := v1manifest.ReadNoVerify(r, &snap); err != nil { return nil, err } indexVersion := snap.Meta[v1manifest.ManifestURLIndex].Version if indexVersion == 0 { return nil, errors.Errorf("missing index manifest in base mirror") } r, err = mirror.Fetch(fmt.Sprintf("%d.%s", indexVersion, v1manifest.ManifestFilenameIndex), 0) if err != nil { return nil, err } defer r.Close() index := v1manifest.Index{} if _, err := v1manifest.ReadNoVerify(r, &index); err != nil { return nil, err } return &index, nil } // the keys in param is keyID -> KeyInfo, we should map it to ownerID -> KeyInfoList func mapOwnerKeys(base Mirror, keys map[string]*v1manifest.KeyInfo) (map[string][]*v1manifest.KeyInfo, error) { index, err := fetchIndexManifestFromMirror(base) if err != nil { return nil, err } keyList := map[string][]*v1manifest.KeyInfo{} for ownerID, owner := range index.Owners { for keyID := range owner.Keys { if key := keys[keyID]; key != nil { keyList[ownerID] = append(keyList[ownerID], key) } } if len(keyList[ownerID]) < owner.Threshold { // We set keys of this owner to empty because we can't clone components belong to this owner keyList[ownerID] = nil } } return keyList, nil } // UpdateManifestForPublish set corresponding field for component manifest func UpdateManifestForPublish(m *v1manifest.Component, name, ver, entry, os, arch, desc string, filehash v1manifest.FileHash) *v1manifest.Component { initTime := time.Now() // update manifest if m == nil { m = v1manifest.NewComponent(name, desc, initTime) } else { v1manifest.RenewManifest(m, initTime) if desc != "" { m.Description = desc } } if utils.Version(ver).IsNightly() { m.Nightly = ver } // Remove history nightly for plat := range m.Platforms { for v := range m.Platforms[plat] { if strings.Contains(v, utils.NightlyVersionAlias) && v != m.Nightly { delete(m.Platforms[plat], v) } } } platformStr := fmt.Sprintf("%s/%s", os, arch) if m.Platforms[platformStr] == nil { m.Platforms[platformStr] = map[string]v1manifest.VersionItem{} } m.Platforms[platformStr][ver] = v1manifest.VersionItem{ Entry: entry, Released: initTime.Format(time.RFC3339), URL: fmt.Sprintf("/%s-%s-%s-%s.tar.gz", name, ver, os, arch), FileHash: filehash, } return m } tiup-1.16.3/pkg/repository/merge_mirror_test.go000066400000000000000000000134501505422223000216400ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "encoding/json" "io" "testing" "github.com/pingcap/tiup/pkg/repository/model" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils/mock" "github.com/stretchr/testify/assert" ) func manifest2str(m *v1manifest.Manifest) string { b, err := json.Marshal(m) if err != nil { panic(err) } return string(b) } func baseMirror4test(ownerKeys map[string]*v1manifest.KeyInfo) Mirror { return &MockMirror{ Resources: map[string]string{ "snapshot.json": manifest2str(&v1manifest.Manifest{ Signed: &v1manifest.Snapshot{ Meta: map[string]v1manifest.FileVersion{ "/index.json": { Version: 1, }, "/test.json": { Version: 1, }, }, }, }), "1.index.json": manifest2str(&v1manifest.Manifest{ Signed: &v1manifest.Index{ Owners: map[string]v1manifest.Owner{ "pingcap": { Name: "PingCAP", Keys: ownerKeys, Threshold: 1, }, }, Components: map[string]v1manifest.ComponentItem{ "test": { Owner: "pingcap", URL: "/test.json", }, }, }, }), "1.test.json": manifest2str(&v1manifest.Manifest{ Signed: &v1manifest.Component{ Platforms: map[string]map[string]v1manifest.VersionItem{ "linux/amd64": { "v1.0.0": { URL: "/test-v1.0.0-linux-amd64.tar.gz", Entry: "test", }, }, }, }, }), }, } } func sourceMirror4test() Mirror { return &MockMirror{ Resources: map[string]string{ "snapshot.json": manifest2str(&v1manifest.Manifest{ Signed: &v1manifest.Snapshot{ Meta: map[string]v1manifest.FileVersion{ "/index.json": { Version: 2, }, "/hello.json": { Version: 1, }, "/test.json": { Version: 1, }, }, }, }), "2.index.json": manifest2str(&v1manifest.Manifest{ Signed: &v1manifest.Index{ Components: map[string]v1manifest.ComponentItem{ "test": { Owner: "pingcap", URL: "/test.json", }, "hello": { Owner: "pingcap", URL: "/hello.json", }, }, }, }), "1.test.json": manifest2str(&v1manifest.Manifest{ Signed: &v1manifest.Component{ Platforms: map[string]map[string]v1manifest.VersionItem{ "linux/amd64": { "v1.0.0": { URL: "/test-v1.0.0-linux-amd64.tar.gz", Entry: "test", }, "v1.0.1": { URL: "/test-v1.0.1-linux-amd64.tar.gz", Entry: "test", }, }, "linux/arm64": { "v1.0.0": { URL: "/test-v1.0.0-linux-arm64.tar.gz", Entry: "test", }, }, }, }, }), "1.hello.json": manifest2str(&v1manifest.Manifest{ Signed: &v1manifest.Component{ Platforms: map[string]map[string]v1manifest.VersionItem{ "linux/amd64": { "v1.0.0": { URL: "/hello-v1.0.0-linux-amd64.tar.gz", Entry: "hello", }, }, }, }, }), "hello-v1.0.0-linux-amd64.tar.gz": "hello-v1.0.0-linux-amd64.tar.gz", "test-v1.0.1-linux-amd64.tar.gz": "test-v1.0.1-linux-amd64.tar.gz", "test-v1.0.0-linux-arm64.tar.gz": "test-v1.0.0-linux-arm64.tar.gz", }, } } func TestDiffMirror(t *testing.T) { base := baseMirror4test(nil) source := sourceMirror4test() items, err := diffMirror(base, source) assert.Nil(t, err) assert.Equal(t, 3, len(items)) for _, it := range items { assert.Contains(t, []string{ "/hello-v1.0.0-linux-amd64.tar.gz", "/test-v1.0.0-linux-arm64.tar.gz", "/test-v1.0.1-linux-amd64.tar.gz", }, it.versionItem.URL) } } func TestMergeMirror(t *testing.T) { ki, err := v1manifest.GenKeyInfo() if err != nil { panic(err) } id, err := ki.ID() if err != nil { panic(err) } keys := map[string]*v1manifest.KeyInfo{ id: ki, } base := baseMirror4test(keys) source := sourceMirror4test() // manifestList := []*v1manifest.Manifest{} // componentInfoList := []model.ComponentInfo{} err = MergeMirror(keys, base, source) assert.Nil(t, err) mock.With("Publish", func(manifest *v1manifest.Manifest, info model.ComponentInfo) { assert.Contains(t, []string{ "hello-v1.0.0-linux-amd64.tar.gz", "test-v1.0.0-linux-arm64.tar.gz", "test-v1.0.1-linux-amd64.tar.gz", }, info.Filename()) b, err := io.ReadAll(info) assert.Nil(t, err) assert.Contains(t, []string{ "hello-v1.0.0-linux-amd64.tar.gz", "test-v1.0.0-linux-arm64.tar.gz", "test-v1.0.1-linux-amd64.tar.gz", }, string(b)) })() } func TestFetchIndex(t *testing.T) { source := sourceMirror4test() index, err := fetchIndexManifestFromMirror(source) assert.Nil(t, err) assert.NotEmpty(t, index.Components["hello"].URL) assert.NotEmpty(t, index.Components["test"].URL) base := baseMirror4test(nil) index, err = fetchIndexManifestFromMirror(base) assert.Nil(t, err) assert.NotEmpty(t, index.Owners["pingcap"].Name) } func TestFetchComponent(t *testing.T) { source := sourceMirror4test() comp, err := fetchComponentManifestFromMirror(source, "test") assert.Nil(t, err) assert.NotEmpty(t, comp.Platforms["linux/amd64"]) assert.NotEmpty(t, comp.Platforms["linux/arm64"]) assert.NotEmpty(t, comp.Platforms["linux/amd64"]["v1.0.0"].URL) assert.NotEmpty(t, comp.Platforms["linux/amd64"]["v1.0.1"].URL) assert.NotEmpty(t, comp.Platforms["linux/arm64"]["v1.0.0"].URL) } tiup-1.16.3/pkg/repository/mirror.go000066400000000000000000000377711505422223000174360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "bytes" "crypto/tls" "encoding/json" stderrors "errors" "fmt" "io" "net/http" "net/url" "os" "path" "path/filepath" "strconv" "strings" "time" "github.com/cavaliergopher/grab/v3" "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/crypto/rand" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository/model" "github.com/pingcap/tiup/pkg/repository/store" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/utils/mock" "github.com/pingcap/tiup/pkg/version" ) const ( // OptionYanked is the key that represents a component is yanked or not OptionYanked = "yanked" // OptionStandalone is the key that represents a component is standalone or not OptionStandalone = "standalone" // OptionHidden is the key that represents a component is hidden or not OptionHidden = "hidden" ) // predefined errors var ( ErrNotFound = stderrors.New("not found") // resource does not exists ErrManifestTooOld = stderrors.New("component manifest is too old, update it before publish") ) type ( // DownloadProgress represents the download progress notifier DownloadProgress interface { Start(url string, size int64) SetCurrent(size int64) Finish() } // MirrorOptions is used to customize the mirror download options MirrorOptions struct { Progress DownloadProgress Upstream string KeyDir string } // Mirror represents a repository mirror, which can be remote HTTP // server or a local file system directory Mirror interface { model.Backend // Source returns the address of the mirror Source() string // Open initialize the mirror. Open() error // Download fetches a resource to disk. // The implementation must return ErrNotFound if the resource not exists. Download(resource, targetDir string) error // Fetch fetches a resource into memory. The caller must close the returned reader. Id the size of the resource // is greater than maxSize, Fetch returns an error. Use maxSize == 0 for no limit. // The implementation must return ErrNotFound if the resource not exists. Fetch(resource string, maxSize int64) (io.ReadCloser, error) // Close closes the mirror and release local stashed files. Close() error } ) // NewMirror returns a mirror instance Base on the schema of mirror func NewMirror(mirror string, options MirrorOptions) Mirror { if options.Progress == nil { options.Progress = &ProgressBar{} } if strings.HasPrefix(mirror, "http") { return &httpMirror{ server: mirror, options: options, } } return &localFilesystem{rootPath: mirror, keyDir: options.KeyDir, upstream: options.Upstream} } type localFilesystem struct { rootPath string keyDir string upstream string keys map[string]*v1manifest.KeyInfo } // Source implements the Mirror interface func (l *localFilesystem) Source() string { return l.rootPath } // Open implements the Mirror interface func (l *localFilesystem) Open() error { fi, err := os.Stat(l.rootPath) if err != nil { return errors.Trace(err) } if !fi.IsDir() { return errors.Errorf("local system mirror `%s` should be a directory", l.rootPath) } if l.keyDir == "" { l.keyDir = path.Join(l.rootPath, "keys") } if utils.IsNotExist(l.keyDir) { return nil } return l.loadKeys() } // load mirror keys func (l *localFilesystem) loadKeys() error { l.keys = make(map[string]*v1manifest.KeyInfo) return filepath.Walk(l.keyDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } f, err := os.Open(path) if err != nil { return errors.Annotate(err, "open file while loadKeys") } defer f.Close() ki := v1manifest.KeyInfo{} if err := json.NewDecoder(f).Decode(&ki); err != nil { return errors.Annotate(err, "decode key") } id, err := ki.ID() if err != nil { return err } l.keys[id] = &ki return nil }) } // Publish implements the model.Backend interface func (l *localFilesystem) Publish(manifest *v1manifest.Manifest, info model.ComponentInfo) error { txn, err := store.New(l.rootPath, l.upstream).Begin() if err != nil { return err } if err := model.New(txn, l.keys).Publish(manifest, info); err != nil { _ = txn.Rollback() return err } return nil } // Grant implements the model.Backend interface func (l *localFilesystem) Grant(id, name string, key *v1manifest.KeyInfo) error { txn, err := store.New(l.rootPath, l.upstream).Begin() if err != nil { return err } if err := model.New(txn, l.keys).Grant(id, name, key); err != nil { _ = txn.Rollback() return err } return nil } // Rotate implements the model.Backend interface func (l *localFilesystem) Rotate(m *v1manifest.Manifest) error { txn, err := store.New(l.rootPath, l.upstream).Begin() if err != nil { return err } if err := model.New(txn, l.keys).Rotate(m); err != nil { _ = txn.Rollback() return err } return nil } // Download implements the Mirror interface func (l *localFilesystem) Download(resource, targetDir string) error { reader, err := l.Fetch(resource, 0) if err != nil { return errors.Trace(err) } defer reader.Close() if err := utils.MkdirAll(targetDir, 0755); err != nil { return errors.Trace(err) } outPath := filepath.Join(targetDir, resource) writer, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) if err != nil { if os.IsNotExist(err) { return errors.Annotatef(ErrNotFound, "resource %s", resource) } return errors.Trace(err) } defer writer.Close() _, err = io.Copy(writer, reader) return err } // Fetch implements the Mirror interface func (l *localFilesystem) Fetch(resource string, maxSize int64) (io.ReadCloser, error) { path := filepath.Join(l.rootPath, resource) file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm) if err != nil { if os.IsNotExist(err) { return nil, errors.Annotatef(ErrNotFound, "resource %s", resource) } return nil, errors.Trace(err) } if maxSize > 0 { info, err := file.Stat() if err != nil { return nil, errors.Trace(err) } if info.Size() > maxSize { return nil, errors.Errorf("local load from %s failed, maximum size exceeded, file size: %d, max size: %d", resource, info.Size(), maxSize) } } return file, nil } // Close implements the Mirror interface func (l *localFilesystem) Close() error { return nil } type httpMirror struct { server string tmpDir string options MirrorOptions } // Source implements the Mirror interface func (l *httpMirror) Source() string { return l.server } // Open implements the Mirror interface func (l *httpMirror) Open() error { tmpDir := filepath.Join(os.TempDir(), strconv.Itoa(rand.Int())) if err := os.MkdirAll(tmpDir, os.ModePerm); err != nil { return errors.Trace(err) } l.tmpDir = tmpDir return nil } func (l *httpMirror) downloadFile(url string, to string, maxSize int64) (io.ReadCloser, error) { defer func(start time.Time) { logprinter.Verbose("Download resource %s in %s", url, time.Since(start)) }(time.Now()) client := grab.NewClient() // workaround to resolve cdn error "tls: protocol version not supported" client.HTTPClient.(*http.Client).Transport = &http.Transport{ Proxy: http.ProxyFromEnvironment, // avoid using http/2 by setting non-nil TLSClientConfig TLSClientConfig: &tls.Config{}, } client.UserAgent = fmt.Sprintf("tiup/%s", version.NewTiUPVersion().SemVer()) req, err := grab.NewRequest(to, url) if err != nil { return nil, errors.Trace(err) } if len(to) == 0 { req.NoStore = true } resp := client.Do(req) // start progress output loop t := time.NewTicker(time.Millisecond) defer t.Stop() var progress DownloadProgress if strings.Contains(url, ".tar.gz") { progress = l.options.Progress } else { progress = DisableProgress{} } progress.Start(url, resp.Size()) L: for { select { case <-t.C: if maxSize > 0 && resp.BytesComplete() > maxSize { _ = resp.Cancel() return nil, errors.Errorf("download from %s failed, resp size %d exceeds maximum size %d", url, resp.BytesComplete(), maxSize) } progress.SetCurrent(resp.BytesComplete()) case <-resp.Done: progress.SetCurrent(resp.BytesComplete()) progress.Finish() break L } } // check for errors if err := resp.Err(); err != nil { if grab.IsStatusCodeError(err) { code := err.(grab.StatusCodeError) if int(code) == http.StatusNotFound { return nil, errors.Annotatef(ErrNotFound, "url %s", url) } } return nil, errors.Annotatef(err, "download from %s failed", url) } if maxSize > 0 && resp.BytesComplete() > maxSize { return nil, errors.Errorf("download from %s failed, resp size %d exceeds maximum size %d", url, resp.BytesComplete(), maxSize) } return resp.Open() } func (l *httpMirror) prepareURL(resource string) string { url := strings.TrimSuffix(l.server, "/") + "/" + strings.TrimPrefix(resource, "/") // Force CDN to refresh if the resource name starts with TiUPBinaryName. if strings.HasPrefix(resource, TiUPBinaryName) { nano := time.Now().UnixNano() url = fmt.Sprintf("%s?v=%d", url, nano) } return url } // Grant implements the model.Backend interface func (l *httpMirror) Grant(id, name string, key *v1manifest.KeyInfo) error { return errors.Errorf("cannot add a user for a remote mirror, please set your mirror to a local directory") } // Rotate implements the model.Backend interface func (l *httpMirror) Rotate(m *v1manifest.Manifest) error { rotateAddr := fmt.Sprintf("%s/api/v1/rotate", l.Source()) data, err := json.Marshal(m) if err != nil { return errors.Annotate(err, "marshal root manifest") } client := http.Client{Timeout: time.Minute} resp, err := client.Post(rotateAddr, "text/json", bytes.NewBuffer(data)) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 300 { return nil } switch resp.StatusCode { case http.StatusConflict: return errors.Errorf("The manifest has been modified after you fetched it, please try again") case http.StatusBadRequest: return errors.Errorf("The server rejected the manifest, please check if it's a valid root manifest") default: buf := new(strings.Builder) if _, err := io.Copy(buf, resp.Body); err != nil { return err } return fmt.Errorf("unknow error from server, response code: %d response body: %s", resp.StatusCode, buf.String()) } } // Publish implements the model.Backend interface func (l *httpMirror) Publish(manifest *v1manifest.Manifest, info model.ComponentInfo) error { sid := uuid.New().String() if info.Filename() != "" { tarAddr := fmt.Sprintf("%s/api/v1/tarball/%s", l.Source(), sid) resp, err := utils.PostFile(info, tarAddr, "file", info.Filename()) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode >= 300 { return errors.Errorf("error on uplaod tarball, server returns %d", resp.StatusCode) } } payload, err := json.Marshal(manifest) if err != nil { return err } bodyBuf := bytes.NewBuffer(payload) q := url.Values{} if info.Yanked() != nil { q.Set(OptionYanked, fmt.Sprintf("%t", *info.Yanked())) } if info.Standalone() != nil { q.Set(OptionStandalone, fmt.Sprintf("%t", *info.Standalone())) } if info.Hidden() != nil { q.Set(OptionHidden, fmt.Sprintf("%t", *info.Hidden())) } qstr := "" if len(q) > 0 { qstr = "?" + q.Encode() } manifestAddr := fmt.Sprintf("%s/api/v1/component/%s/%s%s", l.Source(), sid, manifest.Signed.(*v1manifest.Component).ID, qstr) client := http.Client{Timeout: 5 * time.Minute} resp, err := client.Post(manifestAddr, "text/json", bodyBuf) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode < 300 { return nil } switch resp.StatusCode { case http.StatusConflict: return ErrManifestTooOld case http.StatusForbidden: return errors.Errorf("The server refused, make sure you have access to this component") default: buf := new(strings.Builder) if _, err := io.Copy(buf, resp.Body); err != nil { return err } return fmt.Errorf("unknow error from server, response code: %d response body: %s", resp.StatusCode, buf.String()) } } func (l *httpMirror) isRetryable(err error) bool { retryableList := []string{ "unexpected EOF", "stream error", "server returned 502 Bad Gateway", } for _, text := range retryableList { if strings.Contains(strings.ToLower(err.Error()), strings.ToLower(text)) { return true } } return false } // Download implements the Mirror interface func (l *httpMirror) Download(resource, targetDir string) error { tmpFilePath := filepath.Join(l.tmpDir, resource) dstFilePath := filepath.Join(targetDir, resource) // downloaded file is stored in a temp directory and the temp directory is // deleted at Close(), in this way an interrupted download won't remain // any partial file on the disk var err error _ = utils.Retry(func() error { var r io.ReadCloser if err != nil && l.isRetryable(err) { logprinter.Warnf("failed to download %s(%s), retrying...", resource, err.Error()) } if r, err = l.downloadFile(l.prepareURL(resource), tmpFilePath, 0); err != nil { if l.isRetryable(err) { return err } // Abort retry return nil } return r.Close() }, utils.RetryOption{ Timeout: time.Hour, Attempts: 3, }) if err != nil { return err } if err := utils.MkdirAll(targetDir, 0755); err != nil { return errors.Trace(err) } return utils.Move(tmpFilePath, dstFilePath) } // Fetch implements the Mirror interface func (l *httpMirror) Fetch(resource string, maxSize int64) (io.ReadCloser, error) { return l.downloadFile(l.prepareURL(resource), "", maxSize) } // Close implements the Mirror interface func (l *httpMirror) Close() error { if err := os.RemoveAll(l.tmpDir); err != nil { return errors.Trace(err) } return nil } // MockMirror is a mirror for testing type MockMirror struct { // Resources is a map from resource name to resource content. Resources map[string]string } // Source implements the Mirror interface func (l *MockMirror) Source() string { return "mock" } // Open implements Mirror. func (l *MockMirror) Open() error { return nil } // Download implements Mirror. func (l *MockMirror) Download(resource, targetDir string) error { content, ok := l.Resources[resource] if !ok { return errors.Annotatef(ErrNotFound, "resource %s", resource) } if err := utils.MkdirAll(targetDir, 0755); err != nil { return err } target := filepath.Join(targetDir, resource) file, err := os.OpenFile(target, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) if err != nil { return err } defer file.Close() _, err = file.Write([]byte(content)) return err } // Grant implements the model.Backend interface func (l *MockMirror) Grant(id, name string, key *v1manifest.KeyInfo) error { return nil } // Rotate implements the model.Backend interface func (l *MockMirror) Rotate(m *v1manifest.Manifest) error { return nil } // Publish implements the Mirror interface func (l *MockMirror) Publish(manifest *v1manifest.Manifest, info model.ComponentInfo) error { // Mock point for unit test if fn := mock.On("Publish"); fn != nil { fn.(func(*v1manifest.Manifest, model.ComponentInfo))(manifest, info) } return nil } // Fetch implements Mirror. func (l *MockMirror) Fetch(resource string, maxSize int64) (io.ReadCloser, error) { content, ok := l.Resources[resource] if !ok { return nil, errors.Annotatef(ErrNotFound, "resource %s", resource) } if maxSize > 0 && int64(len(content)) > maxSize { return nil, fmt.Errorf("oversized resource %s in mock mirror %v > %v", resource, len(content), maxSize) } return io.NopCloser(strings.NewReader(content)), nil } // Close implements Mirror. func (l *MockMirror) Close() error { return nil } tiup-1.16.3/pkg/repository/model/000077500000000000000000000000001505422223000166565ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/model/error.go000066400000000000000000000030611505422223000203360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package model import ( "errors" ) var ( // ErrorConflict indicates manifest conflict ErrorConflict = errors.New("manifest conflict") // ErrorMissingKey indicates that the private key is missing ErrorMissingKey = errors.New("the private key is missing") // ErrorMissingOwner indicates that the owner is not found ErrorMissingOwner = errors.New("owner not found") // ErrorWrongSignature indicates that the signature is not correct ErrorWrongSignature = errors.New("invalid signature") // ErrorWrongChecksum indicates that the checksum of tar file is not correct ErrorWrongChecksum = errors.New("checksum mismatch") // ErrorWrongFileName indicates that the name of tar file is not correct ErrorWrongFileName = errors.New("incorrect file name") // ErrorWrongManifestType indicates that the manifest type is not expected ErrorWrongManifestType = errors.New("the manifest type is not expected") // ErrorWrongManifestVersion indicates that the manifest version is not expected ErrorWrongManifestVersion = errors.New("the manifest version is not expected") ) tiup-1.16.3/pkg/repository/model/model.go000066400000000000000000000367061505422223000203210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package model import ( "fmt" "strings" "time" cjson "github.com/gibson042/canonicaljson-go" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/repository/store" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" "github.com/pingcap/tiup/pkg/version" "go.uber.org/zap" ) // Backend defines operations on the manifests type Backend interface { // Publish push a new component to mirror or modify an exists component Publish(manifest *v1manifest.Manifest, info ComponentInfo) error // Introduce add a new owner to mirror Grant(id, name string, key *v1manifest.KeyInfo) error // Rotate update root manifest Rotate(manifest *v1manifest.Manifest) error } type model struct { txn store.FsTxn keys map[string]*v1manifest.KeyInfo } // New returns a object implemented Backend func New(txn store.FsTxn, keys map[string]*v1manifest.KeyInfo) Backend { return &model{txn, keys} } // Grant implements Backend func (m *model) Grant(id, name string, key *v1manifest.KeyInfo) error { initTime := time.Now() keyID, err := key.ID() if err != nil { return err } return utils.RetryUntil(func() error { var indexFileVersion *v1manifest.FileVersion if err := m.updateIndexManifest(initTime, func(im *v1manifest.Manifest) (*v1manifest.Manifest, error) { signed := im.Signed.(*v1manifest.Index) for oid, owner := range signed.Owners { if oid == id { return nil, errors.Errorf("owner %s exists", id) } for kid := range owner.Keys { if kid == keyID { return nil, errors.Errorf("key %s exists", keyID) } } } signed.Owners[id] = v1manifest.Owner{ Name: name, Keys: map[string]*v1manifest.KeyInfo{ keyID: key, }, // TODO: support configable threshold Threshold: 1, } indexFileVersion = &v1manifest.FileVersion{Version: signed.Version + 1} return im, nil }); err != nil { return err } indexFi, err := m.txn.Stat(fmt.Sprintf("%d.index.json", indexFileVersion.Version)) if err != nil { return err } indexFileVersion.Length = uint(indexFi.Size()) if err := m.updateSnapshotManifest(initTime, func(om *v1manifest.Manifest) *v1manifest.Manifest { signed := om.Signed.(*v1manifest.Snapshot) if indexFileVersion != nil { signed.Meta[v1manifest.ManifestURLIndex] = *indexFileVersion } return om }); err != nil { return err } // Update timestamp.json and signature if err := m.updateTimestampManifest(initTime); err != nil { return err } return m.txn.Commit() }, func(err error) bool { return err == store.ErrorFsCommitConflict && m.txn.ResetManifest() == nil }) } // Rotate implements Backend func (m *model) Rotate(manifest *v1manifest.Manifest) error { initTime := time.Now() root, ok := manifest.Signed.(*v1manifest.Root) if !ok { return ErrorWrongManifestType } return utils.RetryUntil(func() error { rm, err := m.readRootManifest() if err != nil { return err } if err := verifyRootManifest(rm, manifest); err != nil { return err } // write new 'root.json' file with version prefix manifestFilename := fmt.Sprintf("%d.root.json", root.Version) if err := m.txn.WriteManifest(manifestFilename, manifest); err != nil { return err } /* not yet update the 'root.json' without version prefix, as we don't * have a '1.root.json', so the 'root.json' is playing the role of initial * '1.root.json', clients are updating to the latest 'n.root.json' no * matter older ones are expired or not * maybe we could update the 'root.json' some day when we have many many * versions of root.json available and the updating process from old clients * are causing performance issues */ // if err := m.txn.WriteManifest("root.json", manifest); err != nil { // return err // } fi, err := m.txn.Stat(manifestFilename) if err != nil { return err } if err := m.updateSnapshotManifest(initTime, func(om *v1manifest.Manifest) *v1manifest.Manifest { signed := om.Signed.(*v1manifest.Snapshot) signed.Meta[v1manifest.ManifestURLRoot] = v1manifest.FileVersion{ Version: root.Version, Length: uint(fi.Size()), } return om }); err != nil { return err } // Update timestamp.json and signature if err := m.updateTimestampManifest(initTime); err != nil { return err } return m.txn.Commit() }, func(err error) bool { return err == store.ErrorFsCommitConflict && m.txn.ResetManifest() == nil }) } // Publish implements Backend func (m *model) Publish(manifest *v1manifest.Manifest, info ComponentInfo) error { signed := manifest.Signed.(*v1manifest.Component) initTime := time.Now() pf := func() error { // Write the component manifest (component.json) if err := m.updateComponentManifest(manifest); err != nil { return err } // Update snapshot.json and signature fi, err := m.txn.Stat(fmt.Sprintf("%d.%s.json", signed.Version, signed.ID)) if err != nil { return err } var indexFileVersion *v1manifest.FileVersion var owner *v1manifest.Owner if err := m.updateIndexManifest(initTime, func(im *v1manifest.Manifest) (*v1manifest.Manifest, error) { // We only update index.json when it's a new component // or the yanked, standalone, hidden fields changed, // or the owner of component changed var ( compItem v1manifest.ComponentItem compExist bool ) componentName := signed.ID signed := im.Signed.(*v1manifest.Index) if compItem, compExist = signed.Components[componentName]; compExist { // Find the owner of target component var o v1manifest.Owner if info.OwnerID() != "" { o = signed.Owners[info.OwnerID()] } else { o = signed.Owners[compItem.Owner] } owner = &o if info.Yanked() == nil && info.Hidden() == nil && info.Standalone() == nil && info.OwnerID() == "" { // No changes on index.json return nil, nil } } else { var ownerID string // The component is a new component, so the owner is whoever first create it. for _, sk := range manifest.Signatures { if ownerID, owner = findKeyOwnerFromIndex(signed, sk.KeyID); owner != nil { break } } compItem = v1manifest.ComponentItem{ Owner: ownerID, URL: fmt.Sprintf("/%s.json", componentName), } } if info.Yanked() != nil { compItem.Yanked = *info.Yanked() } if info.Hidden() != nil { compItem.Hidden = *info.Hidden() } if info.Standalone() != nil { compItem.Standalone = *info.Standalone() } if info.OwnerID() != "" { compItem.Owner = info.OwnerID() } signed.Components[componentName] = compItem indexFileVersion = &v1manifest.FileVersion{Version: signed.Version + 1} return im, nil }); err != nil { return err } if err := verifyComponentManifest(owner, manifest); err != nil { return err } if indexFileVersion != nil { indexFi, err := m.txn.Stat(fmt.Sprintf("%d.index.json", indexFileVersion.Version)) if err != nil { return err } indexFileVersion.Length = uint(indexFi.Size()) } if err := m.updateSnapshotManifest(initTime, func(om *v1manifest.Manifest) *v1manifest.Manifest { componentName := signed.ID manifestVersion := signed.Version signed := om.Signed.(*v1manifest.Snapshot) if indexFileVersion != nil { signed.Meta[v1manifest.ManifestURLIndex] = *indexFileVersion } signed.Meta[fmt.Sprintf("/%s.json", componentName)] = v1manifest.FileVersion{ Version: manifestVersion, Length: uint(fi.Size()), } return om }); err != nil { return err } // Update timestamp.json and signature if err := m.updateTimestampManifest(initTime); err != nil { return err } if info.Filename() != "" { if err := m.checkAndWrite(signed, info); err != nil { return err } if signed.ID == version.TiUPVerName { if err := m.copyTiUP(info.Filename()); err != nil { return err } } } return m.txn.Commit() } return utils.RetryUntil(pf, func(err error) bool { return err == store.ErrorFsCommitConflict && m.txn.ResetManifest() == nil }) } func (m *model) copyTiUP(origin string) error { xs := strings.Split(origin, "-") if len(xs) < 4 { return ErrorWrongFileName } // convert // `tiup-${version}-linux-arm64.tar.gz` -> `tiup-linux-arm64.tar.gz` // `tiup-v1.4.0-darwin-amd64.tar.gz` -> `tiup-darwin-amd64.tar.gz` // `tiup-v1.4.0-r13-gcd19b75+staging-darwin-amd64.tar.gz` -> `tiup-darwin-amd64.tar.gz` tiupTar := strings.Join(append(xs[:1], xs[len(xs)-2:]...), "-") reader, err := m.txn.Read(origin) if err != nil { return err } defer reader.Close() return m.txn.Write(tiupTar, reader) } func (m *model) checkAndWrite(manifest *v1manifest.Component, info ComponentData) error { fname := info.Filename() for _, plat := range manifest.Platforms { for _, vi := range plat { if vi.URL[1:] == fname { if err := m.txn.Write(fname, info); err != nil { return err } reader, err := m.txn.Read(fname) if err != nil { return err } defer reader.Close() if err := utils.CheckSHA256(reader, vi.Hashes["sha256"]); err == nil { return nil } return ErrorWrongChecksum } } } return ErrorWrongFileName } func findKeyOwnerFromIndex(signed *v1manifest.Index, keyID string) (string, *v1manifest.Owner) { for on := range signed.Owners { for k := range signed.Owners[on].Keys { if k == keyID { o := signed.Owners[on] return on, &o } } } return "", nil } func (m *model) updateComponentManifest(manifest *v1manifest.Manifest) error { signed := manifest.Signed.(*v1manifest.Component) snap, err := m.readSnapshotManifest() if err != nil { return err } snapSigned := snap.Signed.(*v1manifest.Snapshot) lastVersion := snapSigned.Meta["/"+signed.Filename()].Version if signed.Version != lastVersion+1 { zap.L().Debug( "Component version not expected", zap.Uint("expected", lastVersion+1), zap.Uint("got", signed.Version), ) return ErrorConflict } return m.txn.WriteManifest(fmt.Sprintf("%d.%s.json", signed.Version, signed.ID), manifest) } func (m *model) updateIndexManifest(initTime time.Time, f func(*v1manifest.Manifest) (*v1manifest.Manifest, error)) error { snap, err := m.readSnapshotManifest() if err != nil { return err } snapSigned := snap.Signed.(*v1manifest.Snapshot) lastVersion := snapSigned.Meta[v1manifest.ManifestURLIndex].Version last, err := m.txn.ReadManifest(fmt.Sprintf("%d.index.json", lastVersion), &v1manifest.Index{}) if err != nil { return err } manifest, err := f(last) if err != nil { return err } if manifest == nil { return nil } signed := manifest.Signed.(*v1manifest.Index) v1manifest.RenewManifest(signed, initTime) manifest.Signatures, err = m.sign(manifest.Signed) if err != nil { return err } return m.txn.WriteManifest(fmt.Sprintf("%d.index.json", signed.Version), manifest) } func (m *model) updateSnapshotManifest(initTime time.Time, f func(*v1manifest.Manifest) *v1manifest.Manifest) error { last, err := m.txn.ReadManifest(v1manifest.ManifestFilenameSnapshot, &v1manifest.Snapshot{}) if err != nil { return err } manifest := f(last) if manifest == nil { return nil } v1manifest.RenewManifest(manifest.Signed, initTime) manifest.Signatures, err = m.sign(manifest.Signed) if err != nil { return err } return m.txn.WriteManifest(v1manifest.ManifestFilenameSnapshot, manifest) } // readSnapshotManifest returns snapshot.json func (m *model) readSnapshotManifest() (*v1manifest.Manifest, error) { return m.txn.ReadManifest(v1manifest.ManifestFilenameSnapshot, &v1manifest.Snapshot{}) } // readRootManifest returns the latest root.json func (m *model) readRootManifest() (*v1manifest.Manifest, error) { root, err := m.txn.ReadManifest(v1manifest.ManifestFilenameRoot, &v1manifest.Root{}) if err != nil { return root, err } for { file := fmt.Sprintf("%d.%s", root.Signed.Base().Version+1, root.Signed.Filename()) last, err := m.txn.ReadManifest(file, &v1manifest.Root{}) if err != nil { return root, nil } root = last } } func (m *model) updateTimestampManifest(initTime time.Time) error { fi, err := m.txn.Stat(v1manifest.ManifestFilenameSnapshot) if err != nil { return err } reader, err := m.txn.Read(v1manifest.ManifestFilenameSnapshot) if err != nil { return err } sha256, err := utils.SHA256(reader) if err != nil { reader.Close() return err } reader.Close() manifest, err := m.txn.ReadManifest(v1manifest.ManifestFilenameTimestamp, &v1manifest.Timestamp{}) if err != nil { return err } signed := manifest.Signed.(*v1manifest.Timestamp) signed.Meta[v1manifest.ManifestURLSnapshot] = v1manifest.FileHash{ Hashes: map[string]string{ v1manifest.SHA256: sha256, }, Length: uint(fi.Size()), } v1manifest.RenewManifest(manifest.Signed, initTime) manifest.Signatures, err = m.sign(manifest.Signed) if err != nil { return err } return m.txn.WriteManifest(v1manifest.ManifestFilenameTimestamp, manifest) } func (m *model) sign(signed v1manifest.ValidManifest) ([]v1manifest.Signature, error) { payload, err := cjson.Marshal(signed) if err != nil { return nil, err } rm, err := m.readRootManifest() if err != nil { return nil, err } root := rm.Signed.(*v1manifest.Root) signs := []v1manifest.Signature{} for _, pubKey := range root.Roles[signed.Base().Ty].Keys { id, err := pubKey.ID() if err != nil { return nil, err } privKey := m.keys[id] if privKey == nil { return nil, ErrorMissingKey } sign, err := privKey.Signature(payload) if err != nil { return nil, errors.Trace(err) } signs = append(signs, v1manifest.Signature{ KeyID: id, Sig: sign, }) } return signs, nil } func verifyComponentManifest(owner *v1manifest.Owner, m *v1manifest.Manifest) error { if owner == nil { return ErrorMissingOwner } payload, err := cjson.Marshal(m.Signed) if err != nil { return err } for _, s := range m.Signatures { k := owner.Keys[s.KeyID] if k == nil { continue } if err := k.Verify(payload, s.Sig); err == nil { return nil } } return ErrorWrongSignature } func verifyRootManifest(oldM *v1manifest.Manifest, newM *v1manifest.Manifest) error { newRoot := newM.Signed.(*v1manifest.Root) newKeys := set.NewStringSet() payload, err := cjson.Marshal(newM.Signed) if err != nil { return err } for _, s := range newM.Signatures { id := s.KeyID k := newRoot.Roles[v1manifest.ManifestTypeRoot].Keys[id] if err := k.Verify(payload, s.Sig); err == nil { newKeys.Insert(s.KeyID) } } oldRoot := oldM.Signed.(*v1manifest.Root) oldKeys := set.NewStringSet() for id := range oldRoot.Roles[v1manifest.ManifestTypeRoot].Keys { oldKeys.Insert(id) } if len(oldKeys.Intersection(newKeys).Slice()) < int(oldRoot.Roles[v1manifest.ManifestTypeRoot].Threshold) { return errors.Annotatef(ErrorWrongSignature, "need %d valid signatures, only got %d", oldRoot.Roles[v1manifest.ManifestTypeRoot].Threshold, len(oldKeys.Intersection(newKeys).Slice()), ) } if newRoot.Version != oldRoot.Version+1 { return errors.Annotatef(ErrorWrongManifestVersion, "expect %d, got %d", oldRoot.Version+1, newRoot.Version) } return nil } tiup-1.16.3/pkg/repository/model/publish.go000066400000000000000000000032651505422223000206610ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package model import "io" // ComponentData is used to represent the tarball type ComponentData interface { io.Reader // Filename is the name of tarball Filename() string } // ComponentInfo is used to update component type ComponentInfo interface { ComponentData Standalone() *bool Yanked() *bool Hidden() *bool OwnerID() string } // PublishInfo implements ComponentInfo type PublishInfo struct { ComponentData Stand *bool Yank *bool Hide *bool Owner string } // TarInfo implements ComponentData type TarInfo struct { io.Reader Name string } // Filename implements ComponentData func (ti *TarInfo) Filename() string { return ti.Name } // Filename implements ComponentData func (i *PublishInfo) Filename() string { if i.ComponentData == nil { return "" } return i.ComponentData.Filename() } // Standalone implements ComponentInfo func (i *PublishInfo) Standalone() *bool { return i.Stand } // Yanked implements ComponentInfo func (i *PublishInfo) Yanked() *bool { return i.Yank } // Hidden implements ComponentInfo func (i *PublishInfo) Hidden() *bool { return i.Hide } // OwnerID implements ComponentInfo func (i *PublishInfo) OwnerID() string { return i.Owner } tiup-1.16.3/pkg/repository/progress.go000066400000000000000000000032151505422223000177520ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "fmt" "github.com/cheggaaa/pb/v3" ) // DisableProgress implement the DownloadProgress interface and disable download progress type DisableProgress struct{} // Start implement the DownloadProgress interface func (d DisableProgress) Start(url string, size int64) {} // SetCurrent implement the DownloadProgress interface func (d DisableProgress) SetCurrent(size int64) {} // Finish implement the DownloadProgress interface func (d DisableProgress) Finish() {} // ProgressBar implement the DownloadProgress interface with download progress type ProgressBar struct { bar *pb.ProgressBar size int64 } // Start implement the DownloadProgress interface func (p *ProgressBar) Start(url string, size int64) { p.size = size p.bar = pb.Start64(size) p.bar.Set(pb.Bytes, true) p.bar.SetTemplateString(fmt.Sprintf(`download %s {{counters . }} {{percent . }} {{speed . "%%s/s" "? MiB/s"}}`, url)) } // SetCurrent implement the DownloadProgress interface func (p *ProgressBar) SetCurrent(size int64) { p.bar.SetCurrent(size) } // Finish implement the DownloadProgress interface func (p *ProgressBar) Finish() { p.bar.Finish() } tiup-1.16.3/pkg/repository/progress_test.go000066400000000000000000000013451505422223000210130ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "testing" ) func TestProgress(t *testing.T) { ps := []DownloadProgress{ DisableProgress{}, &ProgressBar{}, } for _, p := range ps { p.Start("x", 10) p.SetCurrent(5) p.Finish() } } tiup-1.16.3/pkg/repository/repository.go000066400000000000000000000043311505422223000203250ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" ) // Repository represents a local components repository that mirrored the remote Repository(either filesystem or HTTP server). type Repository interface { Mirror() Mirror WithOptions(opts Options) Repository UpdateComponents(specs []ComponentSpec) error ResolveComponentVersion(id, constraint string) (utils.Version, error) BinaryPath(installPath string, componentID string, ver string) (string, error) DownloadTiUP(targetDir string) error DownloadComponent(item *v1manifest.VersionItem, target string) error LocalLoadManifest(index *v1manifest.Index) (*v1manifest.Manifest, bool, error) LocalLoadComponentManifest(component *v1manifest.ComponentItem, filename string) (*v1manifest.Component, error) LocalComponentManifest(id string, withYanked bool) (com *v1manifest.Component, err error) LocalComponentVersion(id, ver string, includeYanked bool) (*v1manifest.VersionItem, error) LocalComponentInstalled(component, version string) (bool, error) GetComponentManifest(id string, withYanked bool) (com *v1manifest.Component, err error) FetchIndexManifest() (index *v1manifest.Index, err error) FetchRootManifest() (root *v1manifest.Root, err error) PurgeTimestamp() UpdateComponentManifests() error LatestStableVersion(id string, withYanked bool) (utils.Version, *v1manifest.VersionItem, error) LatestNightlyVersion(id string) (utils.Version, *v1manifest.VersionItem, error) ComponentVersion(id, ver string, includeYanked bool) (*v1manifest.VersionItem, error) } // Options represents options for a repository type Options struct { GOOS string GOARCH string DisableDecompress bool } tiup-1.16.3/pkg/repository/store/000077500000000000000000000000001505422223000167125ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/store/local.go000066400000000000000000000032411505422223000203330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package store import ( "os" "path" "time" "github.com/gofrs/flock" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/utils" ) type localStore struct { root string upstream string flock *flock.Flock } func newLocalStore(root, upstream string) *localStore { return &localStore{ root: root, upstream: upstream, flock: flock.New(path.Join(root, "lock")), } } // Begin implements the Store func (s *localStore) Begin() (FsTxn, error) { return newLocalTxn(s) } // Returns the last modify time func (s *localStore) last(filename string) (*time.Time, error) { fp := path.Join(s.root, filename) if utils.IsNotExist(fp) { return nil, nil } fi, err := os.Stat(fp) if err != nil { return nil, errors.Annotate(err, "Stat file") } mt := fi.ModTime() return &mt, nil } func (s *localStore) path(filename string) string { return path.Join(s.root, filename) } func (s *localStore) lock() error { return s.flock.Lock() } func (s *localStore) unlock() { // The unlock operation must success, otherwise the later operation will stuck if err := s.flock.Unlock(); err != nil { panic(errors.Annotate(err, "unlock filesystem failed")) } } tiup-1.16.3/pkg/repository/store/store.go000066400000000000000000000024651505422223000204040ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package store import ( "io" "os" "github.com/pingcap/tiup/pkg/repository/v1manifest" ) // Store represents the storage level type Store interface { Begin() (FsTxn, error) } // FsTxn represent the transaction session of file operations type FsTxn interface { Write(filename string, reader io.Reader) error Read(filename string) (io.ReadCloser, error) WriteManifest(filename string, manifest *v1manifest.Manifest) error ReadManifest(filename string, role v1manifest.ValidManifest) (*v1manifest.Manifest, error) Stat(filename string) (os.FileInfo, error) // ResetManifest should reset the manifest state ResetManifest() error Commit() error Rollback() error } // New returns a Store, currently only qcloud supported func New(root string, upstream string) Store { return newLocalStore(root, upstream) } tiup-1.16.3/pkg/repository/store/store_test.go000066400000000000000000000055741505422223000214470ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package store import ( "os" "testing" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/stretchr/testify/assert" ) func TestEmptyCommit(t *testing.T) { root, err := os.MkdirTemp("", "") assert.Nil(t, err) defer os.RemoveAll(root) store := New(root, "") txn, err := store.Begin() assert.Nil(t, err) err = txn.Commit() assert.Nil(t, err) } func TestSingleWrite(t *testing.T) { root, err := os.MkdirTemp("", "") assert.Nil(t, err) defer os.RemoveAll(root) store := New(root, "") txn, err := store.Begin() assert.Nil(t, err) err = txn.WriteManifest("test.json", &v1manifest.Manifest{ Signed: &v1manifest.Timestamp{ Meta: map[string]v1manifest.FileHash{ "test": { Length: 9527, }, }, }, }) assert.Nil(t, err) m, err := txn.ReadManifest("test.json", &v1manifest.Timestamp{}) assert.Nil(t, err) assert.Equal(t, uint(9527), m.Signed.(*v1manifest.Timestamp).Meta["test"].Length) } func TestConflict(t *testing.T) { root, err := os.MkdirTemp("", "") assert.Nil(t, err) defer os.RemoveAll(root) store := New(root, "") txn1, err := store.Begin() assert.Nil(t, err) txn2, err := store.Begin() assert.Nil(t, err) test := &v1manifest.Manifest{ Signed: &v1manifest.Timestamp{ Meta: map[string]v1manifest.FileHash{ "test": { Length: 9527, }, }, }, } err = txn1.WriteManifest("test.json", test) assert.Nil(t, err) m, err := txn1.ReadManifest("test.json", &v1manifest.Timestamp{}) assert.Nil(t, err) assert.Equal(t, uint(9527), m.Signed.(*v1manifest.Timestamp).Meta["test"].Length) err = txn2.WriteManifest("test.json", test) assert.Nil(t, err) m, err = txn2.ReadManifest("test.json", &v1manifest.Timestamp{}) assert.Nil(t, err) assert.Equal(t, uint(9527), m.Signed.(*v1manifest.Timestamp).Meta["test"].Length) err = txn1.Commit() assert.Nil(t, err) err = txn2.Commit() assert.NotNil(t, err) } func TestUpstream(t *testing.T) { root, err := os.MkdirTemp("", "") assert.Nil(t, err) defer os.RemoveAll(root) txn, err := New(root, "").Begin() assert.Nil(t, err) _, err = txn.ReadManifest("timestamp.json", &v1manifest.Timestamp{}) assert.NotNil(t, err) txn, err = New(root, "https://tiup-mirrors.pingcap.com").Begin() assert.Nil(t, err) m, err := txn.ReadManifest("timestamp.json", &v1manifest.Timestamp{}) assert.Nil(t, err) assert.NotEmpty(t, m.Signed.(*v1manifest.Timestamp).Meta["/snapshot.json"].Hashes) } tiup-1.16.3/pkg/repository/store/sync.go000066400000000000000000000034601505422223000202200ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package store import ( "fmt" "os" "os/exec" "path" "time" "github.com/pingcap/tiup/pkg/utils" ) // Syncer sync diff files to target type Syncer interface { Sync(srcDir string) error } type combinedSyncer struct { syncers []Syncer } func (s *combinedSyncer) Sync(srcDir string) error { for _, sy := range s.syncers { if err := sy.Sync(srcDir); err != nil { return err } } return nil } func combine(syncers ...Syncer) Syncer { return &combinedSyncer{syncers} } type fsSyncer struct { root string } func newFsSyncer(root string) Syncer { return &fsSyncer{root} } func (s *fsSyncer) Sync(srcDir string) error { unix := time.Now().UnixNano() dstDir := path.Join(s.root, fmt.Sprintf("commit-%d", unix)) if err := utils.MkdirAll(dstDir, 0755); err != nil { return err } files, err := os.ReadDir(srcDir) if err != nil { return err } for _, f := range files { if err := utils.Copy(path.Join(srcDir, f.Name()), path.Join(dstDir, f.Name())); err != nil { return err } } return nil } type externalSyncer struct { script string } func newExternalSyncer(scriptPath string) Syncer { return &externalSyncer{scriptPath} } func (s *externalSyncer) Sync(srcDir string) error { cmd := exec.Command(s.script, srcDir) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } tiup-1.16.3/pkg/repository/store/txn.go000066400000000000000000000201501505422223000200500ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package store import ( "bytes" "context" "fmt" "io" "os" "path" "time" cjson "github.com/gibson042/canonicaljson-go" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" ) var ( // ErrorFsCommitConflict indicates concurrent writing file ErrorFsCommitConflict = errors.New("conflict on fs commit") ) // The localTxn is used to implement a filesystem transaction, the basic principle is to // record the timestamp before access any manifest file, and when writing them back, check // if the origin files' timestamp is newer than recorded one, if so, a conflict is detected. // // To get current timestamp: // 1. check if there is timestamp.json in root directory, if so, return it's modify time // 2. check if there is any file in root directory, if so, return the newest one's modify time // 3. return the root directory's modify time // If the timestamp.json is in root directory, we always make sure it's newer than other files // So current timestamp is the modify time of the newest file in root directory // // To read a manifest file: // 1. get current timestamp and record it // 2. read the file from root directory // // To write a manifest file: // 1. get current timestamp and record it // 2. write the manifest file to temporary directory // // To commit a localTxn: // 1. for every accessed file, get the recorded timestamp // 2. for every accessed file, get the current modify time of their origin file in root directory // 3. check if the origin file is newer than recorded timestamp, if so, there must be conflict // 4. copy every file in temporary directory to root directory, if there is a timestamp.json in // temporary directory, it should be the last one to copy type localTxn struct { syncer Syncer store *localStore root string accessed map[string]*time.Time } func newLocalTxn(store *localStore) (*localTxn, error) { syncer := newFsSyncer(path.Join(store.root, "commits")) if script := os.Getenv(localdata.EnvNameMirrorSyncScript); script != "" { syncer = combine(syncer, newExternalSyncer(script)) } root, err := os.MkdirTemp(os.Getenv(localdata.EnvNameComponentDataDir), "tiup-commit-*") if err != nil { return nil, err } txn := &localTxn{ syncer: syncer, store: store, root: root, accessed: make(map[string]*time.Time), } return txn, nil } // Write implements FsTxn func (t *localTxn) Write(filename string, reader io.Reader) error { filepath := path.Join(t.root, filename) file, err := os.Create(filepath) if err != nil { return errors.Annotate(err, "create file") } defer file.Close() _, err = io.Copy(file, reader) return err } // Read implements FsTxn func (t *localTxn) Read(filename string) (io.ReadCloser, error) { filepath := t.store.path(filename) if utils.IsExist(path.Join(t.root, filename)) { filepath = path.Join(t.root, filename) } return os.Open(filepath) } func (t *localTxn) WriteManifest(filename string, manifest *v1manifest.Manifest) error { if err := t.access(filename); err != nil { return err } filepath := path.Join(t.root, filename) file, err := os.Create(filepath) if err != nil { return errors.Annotate(err, "create file") } defer file.Close() bytes, err := cjson.Marshal(manifest) if err != nil { return errors.Annotate(err, "marshal manifest") } if _, err = file.Write(bytes); err != nil { return errors.Annotate(err, "write file") } if err = file.Close(); err != nil { return errors.Annotate(err, "flush file content") } fi, err := os.Stat(filepath) if err != nil { return errors.Annotate(err, "stat file") } // The modify time must increase if !t.first(filename).Before(fi.ModTime()) { mt := time.Unix(0, t.first(filename).UnixNano()+1) return os.Chtimes(filepath, mt, mt) } return nil } func (t *localTxn) ReadManifest(filename string, role v1manifest.ValidManifest) (*v1manifest.Manifest, error) { if err := t.access(filename); err != nil { return nil, err } filepath := t.store.path(filename) if utils.IsExist(path.Join(t.root, filename)) { filepath = path.Join(t.root, filename) } var wc io.Reader file, err := os.Open(filepath) switch { case err == nil: wc = file defer file.Close() case os.IsNotExist(err) && t.store.upstream != "": url := fmt.Sprintf("%s/%s", t.store.upstream, filename) client := utils.NewHTTPClient(time.Minute, nil) body, err := client.Get(context.TODO(), url) if err != nil { return nil, errors.Annotatef(err, "fetch %s", url) } wc = bytes.NewBuffer(body) default: return nil, errors.Annotatef(err, "error on read manifest: %s, upstream %s", err.Error(), t.store.upstream) } return v1manifest.ReadNoVerify(wc, role) } func (t *localTxn) ResetManifest() error { for file := range t.accessed { fp := path.Join(t.root, file) if utils.IsExist(fp) { if err := os.Remove(fp); err != nil { return err } } } t.accessed = make(map[string]*time.Time) return nil } func (t *localTxn) Stat(filename string) (os.FileInfo, error) { if err := t.access(filename); err != nil { return nil, err } filepath := t.store.path(filename) if utils.IsExist(path.Join(t.root, filename)) { filepath = path.Join(t.root, filename) } return os.Stat(filepath) } func (t *localTxn) Commit() error { if err := t.store.lock(); err != nil { return err } defer t.store.unlock() if err := t.checkConflict(); err != nil { return err } files, err := os.ReadDir(t.root) if err != nil { return err } hasTimestamp := false for _, f := range files { // Make sure modify time of the timestamp.json is the newest if f.Name() == v1manifest.ManifestFilenameTimestamp { hasTimestamp = true continue } if err := utils.Copy(path.Join(t.root, f.Name()), t.store.path(f.Name())); err != nil { return err } } if hasTimestamp { if err := utils.Copy(path.Join(t.root, v1manifest.ManifestFilenameTimestamp), t.store.path(v1manifest.ManifestFilenameTimestamp)); err != nil { return err } } if err := t.syncer.Sync(t.root); err != nil { return err } return t.release() } func (t *localTxn) Rollback() error { return t.release() } func (t *localTxn) checkConflict() error { for file := range t.accessed { mt, err := t.store.last(file) if err != nil { return err } if mt != nil && mt.After(*t.first(file)) { return ErrorFsCommitConflict } } return nil } func (t *localTxn) access(filename string) error { // Use the earliest time if t.accessed[filename] != nil { return nil } // Use the modify time of timestamp.json timestamp := t.store.path(v1manifest.ManifestFilenameTimestamp) fi, err := os.Stat(timestamp) if err == nil { mt := fi.ModTime() t.accessed[filename] = &mt } else if !os.IsNotExist(err) { return errors.Annotatef(err, "read %s: %s", v1manifest.ManifestFilenameTimestamp, timestamp) } // Use the newest file in t.store.root files, err := os.ReadDir(t.store.root) if err != nil { return errors.Annotatef(err, "read store root: %s", t.store.root) } for _, f := range files { fi, err := f.Info() if err != nil { return err } if t.accessed[filename] == nil || t.accessed[filename].Before(fi.ModTime()) { mt := fi.ModTime() t.accessed[filename] = &mt } } if t.accessed[filename] != nil { return nil } // Use the mod time of t.store.root fi, err = os.Stat(t.store.root) if err != nil { return errors.Annotatef(err, "read store root: %s", t.store.root) } mt := fi.ModTime() t.accessed[filename] = &mt return nil } // Returns the first access time func (t *localTxn) first(filename string) *time.Time { return t.accessed[filename] } func (t *localTxn) release() error { return os.RemoveAll(t.root) } tiup-1.16.3/pkg/repository/testdata/000077500000000000000000000000001505422223000173675ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/testdata/manifests/000077500000000000000000000000001505422223000213605ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/testdata/manifests/1.alertmanager.json000066400000000000000000000036301505422223000250560ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"bUXHr7pS8vShDcgj7fcCzXlqMzH3dVt84DSDszNYbgmG2dgHDAVWJM6+Wazo2bATX7zuP+o3FeGTJyE2/8VIIowZfuZnBZOwdRUyHF5QUgPHzlEMs4VRhjDZoiI4XEdOnVl1P67ohqDQO5SSmvwUpvsEr+MtHI+kHDYo3vxmzertI2voUhu8EZlvG8ovsexwieXyzqYKzHxYuNwJb4ojUN6t+oG3wAYgbjTiyAYJVjQs7MyrG/yQbGgif0bmjW0fo3wuO2gGKJ91foesBo15vBawcs6agal2U86DBRUN/YcQROATscSadjBvg1ir61cWP/IBbxiLQ5HBprnBd2/XVg=="}],"signed":{"_type":"component","description":"Prometheus alertmanager","expires":"2031-10-21T08:19:55+08:00","id":"alertmanager","name":"alertmanager","platforms":{"darwin/amd64":{"v0.17.0":{"dependencies":null,"entry":"alertmanager/alertmanager","hashes":{"sha256":"d1925332c107b6e2843855d90127fd19289066acb32f76b8e62ce9daa22295e0","sha512":"931b17cc2ae10e65b2ef435bd960b0c3a3a6832416f67b05c702af891afc07dbc983af5798316d54e48dcfada03036ab1d284c7e48e08a55fd3e7e97c1e7f639"},"length":23576647,"released":"2020-05-20T04:46:52+08:00","url":"/alertmanager-v0.17.0-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.17.0":{"dependencies":null,"entry":"alertmanager/alertmanager","hashes":{"sha256":"e917117d14ee55ee8eab883eff8c5fc97675d4e65108f47f2a193f39efc65116","sha512":"c102ce634678374d56d76e497dc141a76366e760f8ec761fd7b99589cb03411fb368c4b4a69c47f154f64d84a593bcc1ea020ddda54e9d961adfb3617790ef0c"},"length":23631933,"released":"2020-05-20T04:46:52+08:00","url":"/alertmanager-v0.17.0-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v0.17.0":{"dependencies":null,"entry":"alertmanager/alertmanager","hashes":{"sha256":"9270d671ea4745ff9fe69cc4ee80d56b3f3a755c0e1b8c4ff83ee770bc24bb32","sha512":"cef80d1cd3e1bb34f766dc36e1ba9ec2dbcef42576204484038b8a37146754a47023e7230ac665739f62bfe1c4e2d6f149ae13402baa1d5aad45bc5bfd7525df"},"length":22115612,"released":"2020-05-20T04:46:52+08:00","url":"/alertmanager-v0.17.0-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.bench.json000066400000000000000000000042671505422223000235020ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"parcypesicUhk/fJ6xt0PsRJFso3QigfognfMCbDHqiq1nsHnNvEEPXpqBJNTrheUJ1a4SpzcbiLEBekNyLCazg3pKZBuQtxCfhI13PaevYKLeS6AuqAskVDzc5my0DmgVXD7vhT/zRSDMx6xDOmygPIg5oYT3Aoj5mXSi2+PX1nbXZMaqVLeq9FpC88eDTvS6zEf3HLTH9kd39jHi7HOp72smhb/P0c3jBWWkLrA4fwCey6uQDvDqLlCzEkYOID6upuiwYsH7vTdDJQHOlBmj3ZVaDNVqMvlX+ZUSxsBoIXXIvQzAzrvnyhF/vA89AQQhE+MTj61bsqZgoE6GwmFg=="}],"signed":{"_type":"component","description":"Benchmark database with different workloads","expires":"2031-10-21T08:19:55+08:00","id":"bench","name":"bench","platforms":{"darwin/amd64":{"v0.0.1":{"dependencies":null,"entry":"bench","hashes":{"sha256":"c4356b4aa42156ad5519a7c158a9479b950d2ef1266e2ff82a5439467c5cca09","sha512":"3d25e8c253159a50401cf3644695b63c37a0ec73ca469833b61d51fad6963b49a628c474302b2b3d98b87b3ee2eb6a7561d10c02485f4fa48245eeb033cccfc8"},"length":6272606,"released":"2020-04-13T23:24:58+08:00","url":"/bench-v0.0.1-darwin-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"bench","hashes":{"sha256":"f3a6fe3f7304f05dd6840731faa8ac467416ded4371489a5f126d478f7e90724","sha512":"4aa12504993b89cf601ddbaecea90f775bc91ff84a35d03524c9cbe81764506722548765e03b81fb020b1bc59d1df2a324aa9d2ac182089767382c5ba258eb61"},"length":6287102,"released":"2020-05-13T12:04:07+08:00","url":"/bench-v0.0.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.0.1":{"dependencies":null,"entry":"bench","hashes":{"sha256":"f3f94beb66e7ba6b47b6cbb7d45b73b62daa8d71a552d1a0061b29e6b73db003","sha512":"aa85ead4cfdd2b89997883eab70ff0040d9909ec01a12f68ca3d789f059f6a77d76ec2d33bdbb0700facc2ca762c202758e2cf36611a08280e837ea8a7ca98bf"},"length":6304225,"released":"2020-04-13T23:24:58+08:00","url":"/bench-v0.0.1-linux-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"bench","hashes":{"sha256":"46656f88408b42f47a7a991170cff53ecea42980cba526165e07956621667f14","sha512":"1fdf9f7d8bec01d130dd9c203c1b0b476a5b05c146bf483085e98e5c37eb22064725c61c8e80361a592f187d96585169032e92111bf38ef4586f9ad8bad4da01"},"length":6314657,"released":"2020-05-13T12:04:07+08:00","url":"/bench-v0.0.2-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.blackbox_exporter.json000066400000000000000000000037151505422223000261350ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"VkxS8iLfbczVf/WaC8y9vOkSRvyQhdvxnc8xoJC1goLt5MMGm2FlE1aX66M4NT8eoB6x4XzgnqqothYsjU5ZPXY3UFbmX6Cjd7Zjlkzp3Z5vmfBsfIibZ/jPJDKjl2ewZg5Nrk5/FSUwshS8WLzqqkzfuUeTg450psSUDFjdUxTNnNsjqs0R8teKNG9/J3res1glZCS+oO9Djw55+0e5IcpF26XGAq3DxZhSID3KE3yOgGWJmnMso0YgkNaHjuIu4honIllcNMUzUV/khnPXn9VqSCXGoScAyivFDjtG5N40qM8a9bSHFQgWGOyvGzzPxB+/lE4jkHHz+Y3qfa3rZQ=="}],"signed":{"_type":"component","description":"Blackbox prober exporter","expires":"2031-10-21T08:19:55+08:00","id":"blackbox_exporter","name":"blackbox_exporter","platforms":{"darwin/amd64":{"v0.12.0":{"dependencies":null,"entry":"blackbox_exporter/blackbox_exporter","hashes":{"sha256":"2866b7ae17598a977c8dc7f87f014fc8f5ca8d3a4aee00989404eae85dde37f7","sha512":"7079e4519918148730c67d685b0ba271ff1f985d5c0bfddfb8e4f17c67f4e4b9b9c9f139a6bdd1ee51c4f73cf0fc60bd8fe18049535f7d56d97d4271d591b787"},"length":4983999,"released":"2020-05-20T04:46:08+08:00","url":"/blackbox_exporter-v0.12.0-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.12.0":{"dependencies":null,"entry":"blackbox_exporter/blackbox_exporter","hashes":{"sha256":"ddc8e6b264880dfa9373de542ffca6b26266d6fa1a13a392751249b34d525e5d","sha512":"a575ddcd50a7ef53094eb49bd3e664cc61103a9ab9be167b64cb0e7ebd942806811ffd0fcc89b5d64780f419e531f4894e7d0ac47e5130ff2aee38f75a7735e2"},"length":4950018,"released":"2020-05-20T04:46:08+08:00","url":"/blackbox_exporter-v0.12.0-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v0.12.0":{"dependencies":null,"entry":"blackbox_exporter/blackbox_exporter","hashes":{"sha256":"738343c4ed4a446438dfa04a3ff8acf0b974b39430dae7d8986115d4d8229c63","sha512":"110446e2b0a3c8bad0940a7b96601b32e17328d3231ae798355fbb8abb623e23e33150580db053b9fa5858e593b922c4aef96986cdee4e42e5a3cf409bfb61d7"},"length":4639708,"released":"2020-05-20T04:46:08+08:00","url":"/blackbox_exporter-v0.12.0-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.cdc.json000066400000000000000000000065131505422223000231500ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"kkrx42WEaszY2M5aHv/lTm0E6y2yBBIz3o3x4IeM2T3Vlu2zWOEEvHYaJkrzkpvbMWhMOYeHHdq1wZWvbM653Ur+sAYYiinS5PHaJ4yNp1U+XOzyUqcbiRFuvtwwwePVW4SnkKbRHLq5xSXNLKx+i/K5F77OuwQvTrw0rdw2Bi8pD3xMsGjonL2c4DWMDi2Djrq1QISXUaSHefjMmMkkqpY0oU69CEcURryPvOebfihF0KReqFY0c1u4vbGxBJQ/FAyKqBcTo1idlxzV8B4EE+Uxl7wHULOX87ObYz4Kt5Vqx1xGoOXUlqCIv4SvCyQjagvqpFXegtowqqpvBbIdaQ=="}],"signed":{"_type":"component","description":"","expires":"2031-10-21T08:19:55+08:00","id":"cdc","name":"cdc","platforms":{"darwin/amd64":{"v4.0.0-rc.1":{"dependencies":null,"entry":"cdc","hashes":{"sha256":"897100a9a37e7d9bfe7fb95fcb51813d8c61cdca57b2e8220a068f349066a087","sha512":"25905571f28745a6e89cb168b9140cb98b574934f537103370fc0cf3ae38493f777d7791af2a97336579604bd28c591c58dc7446c8e153d73d843935cec0bd91"},"length":32075842,"released":"2020-04-29T01:14:00+08:00","url":"/cdc-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"cdc","hashes":{"sha256":"baaea5adbc589983a67dcb48c05026b41edeb285e0d2d2ebca098554a069bfb3","sha512":"b835a175e149e9be197d11da8987e3e8b5e3d7cce92d3e781c926d8d9b0f56f4a66bebb6792b08c2092fdae9a916955cff5d8c3ddf7c76454b446a475bf3f50d"},"length":32084791,"released":"2020-05-15T22:06:33+08:00","url":"/cdc-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v4.0.0-rc":{"dependencies":null,"entry":"cdc","hashes":{"sha256":"084454231ee675bdb858e39b61cd9a77f627e29e5bae889e950eb65721c729c1","sha512":"edecfc87f803ba9e446a17cbfe95f536586a7321545d28830dc0602b412e924ea8eeb179b756f82cc4357a2c8399f76be0fce931262a4d23210ec2ca46845e74"},"length":32883862,"released":"2020-05-13T17:54:45+08:00","url":"/cdc-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"cdc","hashes":{"sha256":"7377f0dc271a4f8a51c894229faeb703ea773abe4cda2e6cdbd62f3c7f9d81f3","sha512":"6191abeb6115fe945f28616cd9c457ac1aae2c88677b5b65b7b4062f993f5ab7d674bcf7127c8438847da601aa9e6cf3b74d396400457f5cfc27e65dbd0b2b8f"},"length":33442502,"released":"2020-04-29T01:14:00+08:00","url":"/cdc-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"cdc","hashes":{"sha256":"b23b4a1e4cfb5551348737d3f800c0a0834308d278efc404d07b32caed87e5a4","sha512":"8c1a8850c0dd0c9e99b1794ac5277ecc9e3f48175f33b6d3ff0e2a098094b0b5184c8c68b16da387906275934e0bd9832a62b8f05787a7bf2df1d04faaa3f001"},"length":33456351,"released":"2020-05-15T22:06:33+08:00","url":"/cdc-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v4.0.0-rc.1":{"dependencies":null,"entry":"cdc","hashes":{"sha256":"55393917dfd796cccef684f25794171face1909c9e94450b7eae64b299989625","sha512":"a972b4a10004d8c910131232844801245c728c50b0fbc9cb51b26efa8c4b0dd9dbf8f0dd9c66bf5f2ff01e73eb2d6365423e36defeacaa3fe049f5c978d23557"},"length":31034141,"released":"2020-04-29T01:14:00+08:00","url":"/cdc-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"cdc","hashes":{"sha256":"554d7ba32fd1465345bf2ec11d51788e69e06b3a4376c67259fc010709685e8f","sha512":"bf5aa2ef8275ed55d9cc0f3012ea69d1a77adf04782186c488e95bf01ac5202cec996b95adcdf0b79ebc2655db4339bb3d4e0f31f9bb809a9a83f001e2e620b2"},"length":31043621,"released":"2020-05-15T22:06:33+08:00","url":"/cdc-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.client.json000066400000000000000000000107261505422223000236760ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"d2e8jHCvlyjpC1dtz+zmXClyBBYrqHVyTT8ecs7zmggetkphzvR+gGmTnYFXrLLAk/HhaKRItY+LNtQXTjhkrnZ4ViuGypBOeo8BLbpsj5UCDHEwvZ61oDSl1U4KidsgS53eUSzVZP4f4e2RzJquS8iRFVB+3ZJ2Qg9WejZmzoicMBnbQfShgmsjZU3ClP8dFpbXGWysve+/GAcHkiLzkfuvC0OH4Klhmtkkl+cmzIKqoXPrx+yeJndus+bOiduaM7f/qinkH2m8G2IX9KKpZFA/fAXJYz7EOSB0Z2HPui0y1Rywo7NSb2vJx8722woZ+8+MbpFh2a3YgltY6BjdSw=="}],"signed":{"_type":"component","description":"A simple mysql client to connect TiDB","expires":"2031-10-21T08:19:55+08:00","id":"client","name":"client","platforms":{"darwin/amd64":{"v0.0.1":{"dependencies":null,"entry":"client","hashes":{"sha256":"334e3e961e301ca01494be9da81e3c45e1abc45b91fc99f0c63a72d606404b14","sha512":"18bbc5ec98b5947104068db8514be927589d672ecf80a6d5173c32fa9a081e61fdc04e2d2fffbe3c78aced843b9070d8a1e439a03310bf89ca8a2828b883aadb"},"length":8053270,"released":"2020-04-13T23:23:54+08:00","url":"/client-v0.0.1-darwin-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"client","hashes":{"sha256":"fd53dc7c483ed5cdabe3249f51d9baea9105763cb930ef6980d61caa3ca4ae8d","sha512":"f23f2ce624c82c94576514552a5f91a596f9d7aea67a41d90d42561711aeb31bd27e5d09a00c7106fb2714385230060d59fb3b397db73139978cc22e68ac6fdf"},"length":7708670,"released":"2020-03-03T19:39:35+08:00","url":"/client-v0.0.3-darwin-amd64.tar.gz","yanked":false},"v0.0.4":{"dependencies":null,"entry":"client","hashes":{"sha256":"34bb24df7b40c362c1dfcc3737b3a18fd30c4cd884c26359cea8e4741f7b4eda","sha512":"2880f8922badc04c040cfce11016f5f189118f55f90578d78a4993f15df6a13d5f5bd4fc8afb133dc5f2e46bb96fe9c04a9c93c3e937c1025cd12d580a41a48b"},"length":8900821,"released":"2020-03-03T19:39:35+08:00","url":"/client-v0.0.4-darwin-amd64.tar.gz","yanked":false},"v0.0.5":{"dependencies":null,"entry":"client","hashes":{"sha256":"d608b2a7e9147b4fa2775f079ef69c50f724e6e3b3ec95a8fb7c4e8579aba84f","sha512":"23fe92593e61a1bf3ca3750a7666d22ce665e6c2a66c01d2ec77f19d236c23552db4e0737d279e7fb105a080c10bc0252e944af15ac1bb70f55d7b7a3f2abe50"},"length":8902204,"released":"2020-03-03T19:39:35+08:00","url":"/client-v0.0.5-darwin-amd64.tar.gz","yanked":false},"v0.0.6":{"dependencies":null,"entry":"client","hashes":{"sha256":"9bbc45f63601d39475e733c3db4aeead6c9ef6be866a5073795c4023d42d7fd9","sha512":"fca2d74384e6938a27f97252ed51bcaaf63e97cf201d7253be46b0ce6da7e67200590c48d4130feeb665d4569ae45225696933679d0cade228b9db7e31de0b55"},"length":8050535,"released":"2020-04-13T23:23:35+08:00","url":"/client-v0.0.6-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.0.1":{"dependencies":null,"entry":"client","hashes":{"sha256":"418a9f73c5363f0638f71af6399ec05401e41fb7420d12ea07e5b5f1df052964","sha512":"768c1fe84b0e5356029ce2ed6130055285f66614bd91dff35bbfc67fcf7903980296e3a89f89bc249fe248baf85eb1faec273e844a14e585ceb56a8e8195225d"},"length":8217339,"released":"2020-04-13T23:23:54+08:00","url":"/client-v0.0.1-linux-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"client","hashes":{"sha256":"1b7fa70e622f25265c9fe44ab2742e6b82ba5767ea471aef20732369caa59d50","sha512":"14f817b700977815e5ce07fa93eb82080ea8bafe03f97946f379d329d61eaff32316ab0774c5f67aeb9e8219608497bdfa51b24eeefe078b1421eb10f296b07c"},"length":7810891,"released":"2020-03-03T19:39:35+08:00","url":"/client-v0.0.3-linux-amd64.tar.gz","yanked":false},"v0.0.4":{"dependencies":null,"entry":"client","hashes":{"sha256":"7cf82b8191e5e59f44e10b68b4081537062e3a9dcccac5efcaa59ee5616ba92e","sha512":"fb1a6792c50f746521f1a0daef9652b4eedef55702c61f534fd8e0f33a022a6160e2b6b6d886af75a8443386473e642f53e2e782ffa4cf96e47144d225af765d"},"length":8140899,"released":"2020-03-03T19:39:35+08:00","url":"/client-v0.0.4-linux-amd64.tar.gz","yanked":false},"v0.0.5":{"dependencies":null,"entry":"client","hashes":{"sha256":"d0f6a136e5f1e86d8f0fdafb488139f9c674827429e2d43840c59f7ce0cec629","sha512":"af94bf3f712bd14659467525ddf121051f3a237075be2b4b81b4dd3e5b4ee98ba7550cf284cac73dd78a0f5ff76a6415a6a26f4da08900486e1511d388706e44"},"length":8140424,"released":"2020-03-03T19:39:35+08:00","url":"/client-v0.0.5-linux-amd64.tar.gz","yanked":false},"v0.0.6":{"dependencies":null,"entry":"client","hashes":{"sha256":"1d958c4c2c5403d0f4c5fa95b297bd50709af8624795363bce04e049b0338d76","sha512":"54e202911f31d65f9505e116cb6ba9344b35613642eabe5c10377656d4f85c0cd4dc1f978ca79c1e119cfa5f616db83a2c28e63d5d743b11b48631a9f9cfa123"},"length":8214831,"released":"2020-04-13T23:23:35+08:00","url":"/client-v0.0.6-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.cluster.json000066400000000000000000000544711505422223000241060ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"P/rQ5Kg/W9lGP7ugkNsfTQX+qKMGZtir1OJnaYvttxF8j4AJTdU8OPW2mxaMCTHlxqS8Karbl9+0UaNzu/dWNiA4+9irLZXHWHqBnW7vvX5MFOlrzrFAsY/NTE5jspEEbcKp+EnzzoH8QFsb74EQ1BPBqPiNHAfBtsYz32EVC9sTed7uGl9GS52JWqcfNKhqrwLZeSJhHhmJ9fhKaTpPN8g8UTBIpGJYwszfxPwnoiE8TymSM/40p/jXCLjBTCGTFrF7vmepGr2sEkMmzOWICdzjb3QANzvMGJ3DkpLdJo79EWZmyyHX4A2R0PRKRjSaQmMMMmXyOjZYCMr6oVUPBg=="}],"signed":{"_type":"component","description":"Deploy a TiDB cluster for production","expires":"2031-10-21T08:19:55+08:00","id":"cluster","name":"cluster","platforms":{"darwin/amd64":{"v0.0.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"e98ed6a36732cefac4f48c1a0ec96dbe57931e81eb0c6ca4cef63f893e13c793","sha512":"3a8d1b0f86613aeb2b6ef5d8b5996132f5cd49bd79b4191bac0ac292ae62903cadd73af34443d5c7a134cc1b1fcb63b84cf22ec8a84fd19c5b490019c529b2ae"},"length":14551928,"released":"2020-03-30T14:55:12.015501614+08:00","url":"/cluster-v0.0.1-darwin-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"eb6e45cf538f37b0b9ac60e15485c4c1ccf48d5d7a2cbeb32b444fbb5854e9dd","sha512":"8b1233a6df62f263dc0c6c3f1c7108fe31b2db24e3b807bf27e0dc387ab3fbbc12bc0c710f45de6f4fc8665372a71c5b8da1e858ddcc3b25b7b84f0eb6b47ee2"},"length":14552336,"released":"2020-03-30T16:41:17.555789598+08:00","url":"/cluster-v0.0.2-darwin-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"2a89011307ff9bd7a2aa0cf23215d7eed65c4b6ff8ff720bedf004f3fd167907","sha512":"2335e4a50db031baf5e0e7ec2d16cb1cef5a43940ca30877a658ff71c2ad51816bf99262e0bc6b8459fc28d31a7ee09a94865d2b2a6e520183e1609332de1518"},"length":14874230,"released":"2020-03-31T09:27:10.617959661+08:00","url":"/cluster-v0.0.3-darwin-amd64.tar.gz","yanked":false},"v0.0.4":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"2edbc3bb71abe049423959519b42370ab809ffc61b0453b113a0532b39d5cb5c","sha512":"6aab0bb34c0850ec2f92ad34a7649f26a5802e2b1edc20b4a939bb1c52c7b4dc69c1e5102a9cea56f1133d4c07bb156d2b84fc8e8654fd1d1a31dc59ac8f56fe"},"length":14877457,"released":"2020-03-31T10:18:27.520655637+08:00","url":"/cluster-v0.0.4-darwin-amd64.tar.gz","yanked":false},"v0.0.5":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"2781eb0657645749ecb098740b78b3123f108443433729cb8ce1a30246be6bc7","sha512":"9455ac70228dd452e3b299eca44d39cd262dfd7060364d89a007b20b8d9b730c07876fa6dafe343f09d217b85c52be5a0fd9b12ed2b7725ff3bdc12536c75572"},"length":14907740,"released":"2020-03-31T13:29:51.535573274+08:00","url":"/cluster-v0.0.5-darwin-amd64.tar.gz","yanked":false},"v0.0.6":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"0a279b4f1a31e5c8caab0f5d88a3704f7d920664d9b08de3ffb13ef59cb81051","sha512":"35b8bd8f97477b1bd93a8e30b5a05f16192ebe24b4e0cc176db40df4139137bec1b3653bb95d613668e5fbe1ef8aa9832f35ee05540f5df99174be355eef4144"},"length":14908182,"released":"2020-03-31T15:37:41.540288658+08:00","url":"/cluster-v0.0.6-darwin-amd64.tar.gz","yanked":false},"v0.0.7":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"49d77cddeb4c60bf73625ea8b55306e910cf8ae46388c97c3c778dcefbe391d4","sha512":"da7d80304d99fbab7d1c8417a1587aa1944dd75fb9d9212f4079c14d744e6f37318757d3d2614733ed6b88ea91aa324a6121924fb2320198c9474920bd90d258"},"length":15012698,"released":"2020-04-01T10:28:45.653773416+08:00","url":"/cluster-v0.0.7-darwin-amd64.tar.gz","yanked":false},"v0.0.8":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"377492fc1c9b2914309987d2f8c978fc813ec27f2cf4ab20ab541925008ed139","sha512":"15c8e9634105b9bab2c299231f8b106c744be70980aff3d3a6bde94c5c27cc9191ea039cef9bc4845b735cf7ab72f434e9aa7ee55cc269750a25fe0c2f8d69a4"},"length":15127924,"released":"2020-04-02T10:00:12.993695734+08:00","url":"/cluster-v0.0.8-darwin-amd64.tar.gz","yanked":false},"v0.0.9":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"82f2efebdc3b3daf9e09d3a624bb0fbf1df4548fa2c8c41719f89f01393775f8","sha512":"3146cb7efebaf1af931bad1f7b1e39daff0963b03e5ba20d2873a6df500416d9407e719c90689fd0013f0bf90b4b3134796c92b4e389763ad5556b79ca8b592e"},"length":15149050,"released":"2020-04-02T14:12:42.21067782+08:00","url":"/cluster-v0.0.9-darwin-amd64.tar.gz","yanked":false},"v0.3.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"dcd704397e9c5d683392455056ccb74a35542796f782b04e340892f782e834ad","sha512":"c6a67e32e4eee1fe00f3fb996a76658a059d97a92feb38d9946a1712769784dfb1b5779595336c328f41e4ea0b04078055651b14904b78ee52870e05ab2d3eca"},"length":15163528,"released":"2020-04-02T19:31:06.00598902+08:00","url":"/cluster-v0.3.1-darwin-amd64.tar.gz","yanked":false},"v0.3.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"bfb3aeb2949e934932933339d6363faa95d1d8bbce484682b6a89b85a79ed7d3","sha512":"814b1b553260ed036babf3f6bb98d025fe2a33ebbb61ce86e2f1a194a22fd11fcdf820b3c2c91d12b4e59a39441d6bf82779e5f37b76eb0aa4fab638dd57fdfa"},"length":15167801,"released":"2020-04-02T20:11:22.911015511+08:00","url":"/cluster-v0.3.2-darwin-amd64.tar.gz","yanked":false},"v0.3.3":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"cf14a7c3c83f31c2e3888c40f3ac63b5bda17c968b573908ec562be7dedd87c4","sha512":"cf71291486e3ba6c9bd1dd06fa161f65454f63890b1ec44f250c71d0d9a70274d96ae290890e45fe01aafd390dfeb7d5301e15221d62b4fef0c252414095ab69"},"length":15167794,"released":"2020-04-02T20:33:00.918662045+08:00","url":"/cluster-v0.3.3-darwin-amd64.tar.gz","yanked":false},"v0.3.4":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"de22245cb6c0f2a713ce495d0cf03f323a6fcdba916c1d8741108187757408ec","sha512":"1dab8098c70b32d498d728383c2ca0188c8c5d8fda6fc054e83c04773cf33c8733356843df35350526a4a9e122b29e9e274e8bd3126a5e31d69d02bf26906afc"},"length":15172903,"released":"2020-04-03T13:03:40+08:00","url":"/cluster-v0.3.4-darwin-amd64.tar.gz","yanked":false},"v0.3.5":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"2664d1de25b666ed345db6e566d320d230eefb51294e3f1347d3d19b04a1b943","sha512":"c92aece62e2e80e28fdcdb1a1283a915cd50468245d077193ef394752687232163d625df12876d02edab218f5707979b9e8cea89baf3ec03bdfd55777d531e2f"},"length":15174238,"released":"2020-04-03T14:38:47+08:00","url":"/cluster-v0.3.5-darwin-amd64.tar.gz","yanked":false},"v0.4.0":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"8685413a5563800176465d8380f4608796dc38fe021198847f325d765bf40056","sha512":"4b5e755984b5e3b89fc6496727598d319016c356f1ee981cf75e46590ca98742a3040e03a11c30612346b7089492490d101b68c51e334b98411f955b7c677c40"},"length":15187390,"released":"2020-04-03T18:00:48+08:00","url":"/cluster-v0.4.0-darwin-amd64.tar.gz","yanked":false},"v0.4.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"8e38bafea72023bce8a2ba0550f787de524380d82c6f36eb6c832c9aec1989a4","sha512":"382274032a5c4608a45e2461fb94d984c6e4d31d579e893b8144bbd68b2f33f63184344cb294038e8faefd4c19838b339127d0bad13c7c47b689bc3c3d200270"},"length":15186802,"released":"2020-04-03T20:50:30+08:00","url":"/cluster-v0.4.1-darwin-amd64.tar.gz","yanked":false},"v0.4.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"69851ffac7ce848fc83508851e4d3265705089b9221360bf5e17d5d13fa426e8","sha512":"6c6d3f5aac5ef2787e7de56c5c3595573665d633799bfb62d006db9011455b63d38cc5793e290c1caacd65e2dc4f88ea227a2100d5bf6031a0dbe0ddd7551d5d"},"length":15186291,"released":"2020-04-03T21:36:50+08:00","url":"/cluster-v0.4.2-darwin-amd64.tar.gz","yanked":false},"v0.4.3":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"2b56f503588a5942625684a0f87ad6fb1c72fcb4e42574580361091b574f55bb","sha512":"e40020fa29d07ca8c4444cdac61e865696d8d917b80e197aae3c1871bd943c5672cca3993d7de8da4e40b369b70dfcd3f38c59a1c6460a3126be4fc332989ee2"},"length":15186440,"released":"2020-04-03T22:27:15+08:00","url":"/cluster-v0.4.3-darwin-amd64.tar.gz","yanked":false},"v0.4.4":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"9608cec3df1f2a266401442dad63d1043612fd2619308099affca182267c387b","sha512":"ade00b89dcd8a84a26c5cf5b4ee5d1b3e4b4c61f7998b0a2db722f3825818394458dbc948fc83e043b5d872f8e6616d73855594f277aa2f89bf38f4591886870"},"length":15475998,"released":"2020-04-07T10:07:58+08:00","url":"/cluster-v0.4.4-darwin-amd64.tar.gz","yanked":false},"v0.4.5":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"3410f4d00f2b75830aa24d4081dbb75a77aafff20092750bd57674241a0e3290","sha512":"07d92b474dca1961671b6d56de26f4171a1fa06182d996c1339f7934ccfd1ddc98311c727d5a6ad81ee5fe9493307e3cad4fc4a1ee9d08ff0a2ac6e2ff805ed2"},"length":15492622,"released":"2020-04-08T10:15:35+08:00","url":"/cluster-v0.4.5-darwin-amd64.tar.gz","yanked":false},"v0.4.6":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"e05f58072f3335c5e5b3d875ac7ea63833467a9fd342f9b90c08a3151ae6f35b","sha512":"ba2c9bad115268191bb314160c9a6ce294e231ede943fe4ae9db4c679b2b12e7a36f6d68d8994d2da8931137ebae880d0c7ace9a1d1be46e5c22b3dd7788fd7e"},"length":15488762,"released":"2020-04-09T07:57:33+08:00","url":"/cluster-v0.4.6-darwin-amd64.tar.gz","yanked":false},"v0.4.7":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"b1c0348aca75c2e66537cb42296b34d5fa31cc7d09140bab78d36705c8d2cbc3","sha512":"523888d3715a09ad962b0d6b43d46c5e321697c34db04782c6b5462ccfa1fc6d5dbb8c5ae18cd5ebe3d3a9565beaa639e45ebf49a65e650053f08aa0e9c13e4c"},"length":15527059,"released":"2020-04-11T06:57:10+08:00","url":"/cluster-v0.4.7-darwin-amd64.tar.gz","yanked":false},"v0.4.8":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"2e234f399cddd1cfed12bef0ab8a17247c42b0ad3e2ae22bbebef4f79648c032","sha512":"65fecc0d0d79bdea73e1a79e179dd628803980c1a85a7b6880ee868db1c99944db899719f1511bfe63746a52dbb25cfb1a8cc19772c16fd26d6e49b8f09a90ef"},"length":15697993,"released":"2020-04-13T11:49:53+08:00","url":"/cluster-v0.4.8-darwin-amd64.tar.gz","yanked":false},"v0.4.9":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"3cc6c908f376fc5ce783d01279aaaef8e7cd72b0a8732d69e190bf460cd775ca","sha512":"ad8507a86d852d819621d6e7a938c63f5f3aee65febd84aa6352de0f0ad875d3086fcc88438ae07b006086c456e3dc584f28d2a0a4a870e7e3eec880742ee0fe"},"length":16085699,"released":"2020-04-16T11:14:30+08:00","url":"/cluster-v0.4.9-darwin-amd64.tar.gz","yanked":false},"v0.5.0":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"8ff08c51a627c46a5e7a253535b8e9016d233f240642a6d5b10af5c00bd316ec","sha512":"752a3a177ce937d96306d51259e37ee9252d3c8b11dbf3915370f0af3977d156867ce53985a548640ac604a0081c68c70e0ad70576918ccf62b3dd94d1e9bb9e"},"length":16251846,"released":"2020-04-24T18:22:07+08:00","url":"/cluster-v0.5.0-darwin-amd64.tar.gz","yanked":false},"v0.6.0":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"7d84616a5484e74f1afb3e8c19fea4a816a3112690d609bc9b03b16d11268669","sha512":"3452d97a047f72a29a915faaa5ba6cc0a16ecc9b54986675f928b091b186d9fdf4c912b118eb8c64a7ec83f0ac646675fd9ca93de9e2281f0461a1432bd15de4"},"length":16298882,"released":"2020-04-28T21:44:23+08:00","url":"/cluster-v0.6.0-darwin-amd64.tar.gz","yanked":false},"v0.6.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"8afe66d87226b5683f08247f19ccff70cc39f88ba3cd38f2573f25e1d3549009","sha512":"9d67fcd1ddcaf39c55fbc4822c9e2fc3f6d64b211cd89205efb94b67c5aaa9e888086a9b0c6a33c566adb86e693a8483cfad27be57caa5940c04db861be56c06"},"length":16663820,"released":"2020-05-15T17:41:18+08:00","url":"/cluster-v0.6.1-darwin-amd64.tar.gz","yanked":false},"v0.6.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"c6b9f8f12002248f2fe3312f8433185ab30ddb8741f8bcf361036af197a8bddf","sha512":"349b38086c33e0210ede25d2673b35d0c86cc54b172dd5019231e5a7dbe48eea0a0b9d2cd327ccaba7eaa6faa140f190e38525edbef68a0b85fc27dcf7187b18"},"length":16820923,"released":"2020-05-20T10:11:42+08:00","url":"/cluster-v0.6.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.0.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"f6ce67b161241e575901a8f9691a1a1e9d13ad3d3f5b94b5a11ab9b3dbec53d9","sha512":"0fb14701de07fe15b5c01222365a49119083eaec1913683de81a8cbb0e404e2e66070fef033897beead17ddef03e4a608ce8631e5738dcc24d8217dfaca42b27"},"length":16666182,"released":"2020-03-30T14:55:12.015501614+08:00","url":"/cluster-v0.0.1-linux-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"3f2def2e924a6be5e4c45b0c620ce6b8a6106fbf77893829e398abe2b945bfe2","sha512":"c388c1579432ef55c017aa3fff670f4962f4cd5147b5abe01f2d258cbeadfb4d02190c83991c0840344fa43d78de7c88b1c53309891e64bc578788c182f927da"},"length":16675593,"released":"2020-03-30T16:41:17.555789598+08:00","url":"/cluster-v0.0.2-linux-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"01a69fadd262c75d9621bdfbe3c802a65408d1b50ef6ae8f5942b456c1df4b63","sha512":"e9e22b3a6c712bc08f76314321350cc6e30ea74fe2e3170366fa8d13edffba88284908d91c4fe0fdf6e4ed9ffa9c0baa9a82529d99c69e99efd0388bb3bc092f"},"length":17046897,"released":"2020-03-31T09:27:10.617959661+08:00","url":"/cluster-v0.0.3-linux-amd64.tar.gz","yanked":false},"v0.0.4":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"28e7b4bd20f6d462ec501ea390ba65a79fa941e3168b6eaf76bf2579140a9939","sha512":"8ceeeb2b8aeef0318e1457321734e0ff2c6190bcf93bb3838b88ff9170dfe9287e1c7670c3c85fa2539dd55cdd0a220ce0385e6833c395d5d372abffc415f65e"},"length":17049886,"released":"2020-03-31T10:18:27.520655637+08:00","url":"/cluster-v0.0.4-linux-amd64.tar.gz","yanked":false},"v0.0.5":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"a3b664807d59758cf58e34c92713038e76a1ce4c6abf8900d1ab0796aaa78e43","sha512":"565d80793a372d017e64b3ba43fcb37b62b89c7106195a14437200b10e5a163ae310eb02981b4c1a8fe7195e5c8342baa1ecd13e5a0282d239433a08be044e9c"},"length":17089741,"released":"2020-03-31T13:29:51.535573274+08:00","url":"/cluster-v0.0.5-linux-amd64.tar.gz","yanked":false},"v0.0.6":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"7a7b3094f3f7f5bfc105cd2f8d781881b0497ca704d646969128e72e7b27e268","sha512":"9d20f73b598f48fbb4ecd9c8ff4623f7b8125d98754c85395498c0abd60e3a02b3e05046a76d404ac06f3ad0231b305613e551996c0f91a2ab06864c3b0bd575"},"length":17081558,"released":"2020-03-31T15:37:41.540288658+08:00","url":"/cluster-v0.0.6-linux-amd64.tar.gz","yanked":false},"v0.0.7":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"38ff9327ed0800adeea6fe7306d64c316c2c5f43f297e474cd06eba8d0c7717e","sha512":"6e5357dc8ba270cdbfb6dba9fcefad02d8d8c8ed065c66851fdf2c28e282016ae480d9f0b152ac0586eb3f9680a186638b43339a8c405dfb7f153de109a9ebe3"},"length":17189082,"released":"2020-04-01T10:28:45.653773416+08:00","url":"/cluster-v0.0.7-linux-amd64.tar.gz","yanked":false},"v0.0.8":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"eccae4424f72d718a876e96e41cbe5e891b720393a2858b18622e59086f4e5f3","sha512":"a7b3f0314f4a308fb344f5a5c69f33bfcca4130be35208eafbd5f4d32df9a3bc8dbd75d3580b8a15aca5d37084b957db125770137f5d9402c145510dabe90002"},"length":17333681,"released":"2020-04-02T10:00:12.993695734+08:00","url":"/cluster-v0.0.8-linux-amd64.tar.gz","yanked":false},"v0.0.9":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"aae8272145b6bb787fa20443ab85da77116d2057887f3159b574f2c0a4c691b8","sha512":"4bda152dd8acbc9580d65045b028a35cd82a9e1913e86d85e67933e0d1395176eb0c09d4089a324215d62210183dab9e7d2b8b4cdc151cf523eba56f252d95bf"},"length":17359475,"released":"2020-04-02T14:12:42.21067782+08:00","url":"/cluster-v0.0.9-linux-amd64.tar.gz","yanked":false},"v0.3.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"b7a3f80ccc45142279d87130a1d7e2bb5bbdacd1fb8736c743c1a299f81e6a66","sha512":"dfb231567008649fc274f0ccfa62eb81d72030607a8128a3a179dd096dc7e81acdf98d19c4b8ec96b3767ece006e22eca3f1af6a061bc1a25040f47cff07da96"},"length":17375719,"released":"2020-04-02T19:31:06.00598902+08:00","url":"/cluster-v0.3.1-linux-amd64.tar.gz","yanked":false},"v0.3.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"7f6f6bf762be14efcea43c7ba3528c75f743f737d1e6fe022d2a58536880d35b","sha512":"33efb6d24bd6833a64d2a40f1243c28b797174fd17144373616c506e0f4ea4a4c47f7e8a43007500c1e139448a6f789dabdc7afdc6a4001f5e5f03fe93249f05"},"length":17373986,"released":"2020-04-02T20:11:22.911015511+08:00","url":"/cluster-v0.3.2-linux-amd64.tar.gz","yanked":false},"v0.3.3":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"e0b6d3b83790acdff90eb4cad2a1ebc5170123cb890fc9d566d3a8f8b56ccba4","sha512":"89ca185a20d4748ff73d1475baefc5a8c6c762d5f5863fccbaedf3becfe0a2631a2a2344380f21ff948acbf33bd605cffa573b6bd5308f814879524a1beb1f14"},"length":17373991,"released":"2020-04-02T20:33:00.918662045+08:00","url":"/cluster-v0.3.3-linux-amd64.tar.gz","yanked":false},"v0.3.4":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"e588b2dd0166b0e9e7f2bebc6d3f602fe9c7d36d37093e7503d933f739e3d07e","sha512":"f5f7c82fe5cf96687862044923327a115d1486dfe6a6ece85c413dd54410712eaad685f78562d822802fa746857f47d0974df52f9f5dac4ef174bd56f1c999ef"},"length":17381627,"released":"2020-04-03T13:03:40+08:00","url":"/cluster-v0.3.4-linux-amd64.tar.gz","yanked":false},"v0.3.5":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"d178a3cb3a529dc76276715ee699ae86269790a6204801d5d1976f3ab3f79a03","sha512":"ae0b363f408c5ed8712b87af2429d5abe0667a2b7d8a995b32d2c40e19c6cb9c46679e217112ca63336ebf89eff59a677007e6e4ac58d4bb77315e36478a265e"},"length":17382177,"released":"2020-04-03T14:38:47+08:00","url":"/cluster-v0.3.5-linux-amd64.tar.gz","yanked":false},"v0.4.0":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"b9ef35223db080397e60a1965ffd05d3f3c0cefb77bacee2411cf51f8ce06a91","sha512":"7d889903453fc8a0573c14554a6b803fafa202cf2e6992ac35e325f4714d13bda99199df163377e6b5106c6d92bd173a5ddef3fdeb013fefa6a6434f3c5fb2db"},"length":17406256,"released":"2020-04-03T18:00:48+08:00","url":"/cluster-v0.4.0-linux-amd64.tar.gz","yanked":false},"v0.4.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"76b661830904cabdf98c9e2c40f62ec887a927a2727e9d2fedfeb02e72e49360","sha512":"3cecd45cd24995ff633a9ebba2cf5aae8ec4a448366ab4976e847d23e69042cd0754cdfc52e2230933f2c0f87a48289a9792c94ea4afc53339401bacbd8d6a65"},"length":17405113,"released":"2020-04-03T20:50:30+08:00","url":"/cluster-v0.4.1-linux-amd64.tar.gz","yanked":false},"v0.4.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"b85325af4817e15c42aa1ef8e6fed9ba7848991ca52e30124e894534ff9eadd8","sha512":"4ec3019915937b99cb45b6a3622b13357f88ce0bd407b0de9e3c77c99748ccb91d6ddc8bf906684c31ff5ccdb3e9c9939d797efdec71bf99e6bf0b552f4434ee"},"length":17397543,"released":"2020-04-03T21:36:50+08:00","url":"/cluster-v0.4.2-linux-amd64.tar.gz","yanked":false},"v0.4.3":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"b4f631022c797a24a62a6beb9bba07aa210f24df97c26b9b995afef8ee716879","sha512":"2ba7445270cf7c1b3766466c7c8d337b6e13739ded5b70d4455f68d207ef3cce09457aa127fbad034320ea002c6a58d7ecaf2142d3322dc769cf6f5be2987e99"},"length":17400435,"released":"2020-04-03T22:27:15+08:00","url":"/cluster-v0.4.3-linux-amd64.tar.gz","yanked":false},"v0.4.4":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"54ce6818d068dd73eac498133c89897c4156b5952efdba09b400bc2ccb6f29c1","sha512":"c3c68b06ddc457679cac0a51ebed743916a9214e99502e95f0bbdf0f5bb9695e2b37f4371b1992f67314bbb0f4f62d275e2c3c1f504fc8581a364d8bcced200b"},"length":17732501,"released":"2020-04-07T10:07:58+08:00","url":"/cluster-v0.4.4-linux-amd64.tar.gz","yanked":false},"v0.4.5":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"c5b5921542a8abaa71e6db267b643661427b188e833cf4bb71d9900440b2eba3","sha512":"237684f8de953b735cce8c7eabcf5ba15fb7a3e435311a17384ab5c520e431c07cd65d955d6756844d7d4b7bd93c9962a988bd3905ace7c82f1a98032ee87cc0"},"length":17749331,"released":"2020-04-08T10:15:35+08:00","url":"/cluster-v0.4.5-linux-amd64.tar.gz","yanked":false},"v0.4.6":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"85220c3edb365878e8c9b63fa349d9f4eba1aefb7fff65830898590f1ef3513f","sha512":"5429195dbff9f1a70f643d47b0cbae0c418ec080ffcf693380bd2df1d0d886567da2adbd0d306be2dff896db15f7e0e227e916cef8f383d4a3f9b6629a1dec3d"},"length":17748098,"released":"2020-04-09T07:57:33+08:00","url":"/cluster-v0.4.6-linux-amd64.tar.gz","yanked":false},"v0.4.7":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"36fa3de9f8e93f2f5704bc1b5a591451ef6b702624336133631f24cbc29b5be0","sha512":"a7227177ba8d896f60563eebcfcb5039e4ee924731f204c8db3b94ffa485fae9b4caa0ad7958ac18dc7207e79bb454dc5edfe4d6ecd5597043db2b8e07a03b7e"},"length":17810260,"released":"2020-04-11T06:57:10+08:00","url":"/cluster-v0.4.7-linux-amd64.tar.gz","yanked":false},"v0.4.8":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"9c795e1ff4ded7ec3fe6e3e1b28c1dd953308e18ed869f4be5b24beb64d4d5ee","sha512":"e05ae2f06eea795f4e33f3afaf5e11b644e5934f9fbff99e60c3c7728ea62cab669be3418e749aa8846726b7f957790b8c0a5aff2b4f62fac255dd6d1cb2cfc1"},"length":17976417,"released":"2020-04-13T11:49:53+08:00","url":"/cluster-v0.4.8-linux-amd64.tar.gz","yanked":false},"v0.4.9":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"2e5022d12606f5c84874bc1bfb15bb3a5fb33d55704f85c6847a2fcbd75f5401","sha512":"8f34de0cc2bf05a96b7ae06a3acba3789b01fbd8fa746f08071dd1f7cd907481909b4b6708bf226cf0b68eae7c4de23fc8a43f6f35c2fc201574ae7b2e166038"},"length":18409831,"released":"2020-04-16T11:14:30+08:00","url":"/cluster-v0.4.9-linux-amd64.tar.gz","yanked":false},"v0.5.0":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"f92f6f96052f57791124e887e7d87c789142d6ea3e764033748e895d4f6aab24","sha512":"559ff65fa547b3cf3defb179037577f29216b982280dee195c80f881dbb6de22b6879e6b650affeadf5c2cdf86da28ebea22393d6b92dd46f4cde3125e5f1baf"},"length":18626321,"released":"2020-04-24T18:22:07+08:00","url":"/cluster-v0.5.0-linux-amd64.tar.gz","yanked":false},"v0.6.0":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"288e2fe881a2bd74caa79cd12a61cbca393aad4bda0d30977b95ed63317280c7","sha512":"cd7af4dc0639c1f4a2e71b8fd64ea9ade91406eef550c9ad4923f1ba84c445ae43057c7fcfd14eaa6ddeebd33c42a110da12db9d4dda9f695eb51502b1c98ebf"},"length":9308558,"released":"2020-04-28T21:44:23+08:00","url":"/cluster-v0.6.0-linux-amd64.tar.gz","yanked":false},"v0.6.1":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"03f04c3f611e501d1786bda4a9f15194d6e661f26cc7c1440516c6ef2c1cea01","sha512":"0775453755e614fb8575ebb3498f2fc8594323b2e8783862f4724c19b04f5b5a247824fa160fd70f4c4030a7964910a8f1c3a7b5b058fc87d776f97533cd568a"},"length":19097785,"released":"2020-05-15T17:41:18+08:00","url":"/cluster-v0.6.1-linux-amd64.tar.gz","yanked":false},"v0.6.2":{"dependencies":null,"entry":"cluster","hashes":{"sha256":"6c080236ff7ae5ca0df1de8351d7a4959949192b1c4a1a44a63b80a6f412fb1d","sha512":"8da32f36ef90c76469c740dce1c6bc42821f36a4d87de3a6b45b0da3fdbfa862c0c3c8089492fbe212be65795222a63cba67d46cb0929b98e3c0d943a570e46e"},"length":19264126,"released":"2020-05-20T10:11:42+08:00","url":"/cluster-v0.6.2-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.ctl.json000066400000000000000000000161241505422223000232000ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"Xb9SkkounUD5DD6LttalTqZBCLp4aUmoNr8UONNTTki5LmjFug+5rgpKCHOfF7NtfwPdNRpe/2lG/JRtao2AXPVV2JSZ4bnf8eF33WFdsiTGZFQQVnS0GF9RI1U3FFCcEIywGcKykIkAJGf1RAAQXb1BxocHaOklrAxmECFMbNxfb3pNpsDk5xAfz7reZz00ftQYC6xMh17lwAMcyMT4tTGANWBPXm2VkQeAONnecLZheN4efPMQ1WEnf7yE8vZTs1os/QB+Ak8S+safz0W5zdeEEJIrx30PQ2CJkZ8WWtM68cmsJPzvMUDqa6SB1hhDMVfEKTNDt4bSNqJriwfo0w=="}],"signed":{"_type":"component","description":"","expires":"2031-10-21T08:19:55+08:00","id":"ctl","name":"ctl","platforms":{"darwin/amd64":{"v3.0.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"cd460aa4140dabd9f38f4ecc7d0a7944d1a0e5ca5b0cff45f9516b2cd7301576","sha512":"bad30f71afe35422d24c0cdbe545592915c906ab1a64cf768e54271d831e6e65ccefb906dc31ad2ddc36207080238c06ab41a4a2542159bedc50aef39b2f70e5"},"length":47160379,"released":"2020-04-27T19:39:08+08:00","url":"/ctl-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"60b3be26637a6a6880c3496a196b33d2a1f4aec231b5241c9241e30fc06c4858","sha512":"b636bd047178e520c76836c56d85eef8d389568426da1c0715cdd05305cc6187d79d159b550804ef5ba784d37e5822b08372f0c0722dd3b37fb3781c32071985"},"length":35943101,"released":"2020-04-26T17:25:49+08:00","url":"/ctl-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"15216c9557cfd36d6d83cbfb6c706f70e8dd8b5f0335cec336246b5e144a367f","sha512":"5934421881993db9dfa37e950069f57ded69f5cad66fb4f28113db8574872ff85dc794cd93a73844bd219db96c5e04ddcdab8ec912ffc750adf4e2b13edb173f"},"length":46679092,"released":"2020-05-09T21:12:23+08:00","url":"/ctl-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"f491dcbc7c26c02d874b70e177d08cd472434ebdbd35a4f51bd482cbf81a0b07","sha512":"4a91adf358f5bf01f2c1e54723b0fa0690ab9a87599047eaa891ecbe7be8ac412e9ca7c976d11296284a9a68fe1beccf92e81e6ee8efab531398963532dbc2f3"},"length":37516422,"released":"2020-04-30T21:03:02+08:00","url":"/ctl-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"923875b86df443c060f06790e96025afd60aaf7037b573bafc9dd57fd886dd9c","sha512":"895523951dddfd1175f736d7fe4d206e56f59cacddea0f5a30f83e7a7f518d70f7558dad186657cffb8a3b707ea9bda6a7f4b956d9fb28a23bb5ad357a956427"},"length":51597246,"released":"2020-04-29T01:04:46+08:00","url":"/ctl-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"fd3a8b585d9c7350ff97edc958355bfaaca256f0b0335b59b1707900f069d70d","sha512":"e35d4f3bbc17f2a642ef99fda820345299d4b96afd5e5dcc95b2316b90cf7f64424f9123ba68e550a411dd3c91c118dd6ef923f61c27ae64a423c74a0b7e9d77"},"length":49289307,"released":"2020-05-15T21:55:32+08:00","url":"/ctl-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"3848ad2a4a1f4f335298bbbf2d9c9dcdbdb1504ce3f4b1cf93d3a8872dae4720","sha512":"0d7f056f07479c4c133ce49e89a113b7030c904f2be95c1d63babaa2c0dc6a90fc4a4f1ad26264c39176725d353100ce72a05bdf64dd58472fab480c08d7e4eb"},"length":102474410,"released":"2020-04-27T19:39:08+08:00","url":"/ctl-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"634251b107ba1764c4afb2c1025e6b35204bb27a151b2e2476d33125a2e1224d","sha512":"5561b68ed609be76028f888ee116867f0aec0600777c03268d1cac26d240d6f3018582146408f9bf83f22e6fc6734f98851562d24148b4c600b14e90eb8b4d85"},"length":105257684,"released":"2020-04-26T17:25:49+08:00","url":"/ctl-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"305cbf3676e1eba8e476b399263ac3d683b60c688b8b2e6133b38e40e89109c1","sha512":"b11277ba145ccf28ab6113ba3026c48d94ea7eee2bed8c071d08e6c8a1236c9b758755a29499da5a8b9750aee385e13c6b46e0ba127bbd9e4ca75c9698989f56"},"length":105102419,"released":"2020-05-09T21:12:23+08:00","url":"/ctl-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"3390822572b565bc639642e3899dc1be182ba6f398ad0facb55e2935377a8ec3","sha512":"0ce409a4464a06ce64b0ad239a3405358bb5028cc104cf64592a3a2dc7ede7ab50cca431aa01624d5068f6ae570a736c3cd3cfdbc55aa9404620c02d69092c45"},"length":113568084,"released":"2020-04-30T21:03:02+08:00","url":"/ctl-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"e1572252bb48e0a474c87d7d6aa508a1ad52fdf9629da32ab7d9fac8d96733e3","sha512":"8c31efd39d13368cb140cc3e61c48666731085d62bdc9547026ea73ab65e5286ef2a9061fca0d490af91152edbb2f5e5b4a4dec53cbc93dafd285961d2831a18"},"length":133977297,"released":"2020-04-29T01:04:46+08:00","url":"/ctl-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"018684a4c7ed1f92adb10ea6da6bd756fcf7f4e3483017ac4d5c681b3b216c67","sha512":"8672bc868d589678de0f4f03b2b3221145ebe82dbea6a97ee160523e0bfdcfeeab54ed1001abc6d398300764455eb6b177f0d5ffb3aca0a4a0fe999ca237d4ae"},"length":131633333,"released":"2020-05-15T21:55:32+08:00","url":"/ctl-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"eafcaba74278b69c12e7b4a86e3b2610eece5bd41a15a741cae75cb013bd494a","sha512":"97779fbdc1126b2598f8c1b40f58513b6b8e43c59cbf5c80bf9942e334823dba815f5aeb809f654a99975cefa6780b085e3bc149c6c95bd1d6f35acb18a86b92"},"length":100184780,"released":"2020-04-27T19:39:08+08:00","url":"/ctl-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"d21caf15a835f9a14890c962055b5031eff1e938d433226bef433acfe94ccd1a","sha512":"c0c6926191f7ec13e73b420337562c5ded9e4e904ea0db056e9836e5550d79ded01c38c60e66995267de5b5db3999cdca5b814961665768a242e59962bf4c228"},"length":101890152,"released":"2020-05-09T21:12:23+08:00","url":"/ctl-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"3fcfc0cd85ebfae2b5ccdcc3a7649d9ef8de199a812e29627bbb31030fccfa38","sha512":"48e7c5d4acc6267aad3be1eaa1a9b90b0bfb05d38c3fcb6a228b1dea4d99efbdb5f98383617cc23c6790dec2b6c86aacbc67175eabb17d1e385207aa4e2b8bff"},"length":110373198,"released":"2020-04-30T21:03:02+08:00","url":"/ctl-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"161d30549f44b9c8c2d59b38cf01745e215eb2fb3fdfdf7d4a1386fee61fea16","sha512":"80562a6b685f75e2a9ee99054465ce1e0791a09f9e43793490e7ddf4f865f8cd4ee0e7e6007018d39119582baa24f9a15c3971fa00e4c57ddf2087e8b076a640"},"length":130379569,"released":"2020-04-29T01:04:46+08:00","url":"/ctl-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"ctl","hashes":{"sha256":"795017952cafb824f0f7f8acf3b598ba1126970d7ac01cc655a1178b578be3bc","sha512":"794e491aa7ab29ae07955a471e2ce463829539649a2156a4712f786daf859af2318e20e4f606fe0f18cc3c20b9228291ce71cb243380067724b99c9a426986ae"},"length":128227076,"released":"2020-05-15T21:55:32+08:00","url":"/ctl-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.doc.json000066400000000000000000000056211505422223000231630ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"QKzYKp8EeDi9ZyWquATuk2TU38Ezov2Vg3FF8Nj3wU1YvbAELMHu0wio2S3ZYfGNe32jti7yb3a5ktk5IVGbTpungT2EtrtfQ8Zvtv80PDCpqqDpn5pDA9uwKSq02mHMxmDFeLSY7o9nN8jn91nxWqsnTF/HDjDY5y7zMHTy2lcizeYIhYLY7DkZIeH46ESLcwoXWtQuftnP0NOwMoSaFPMG9dpwjwigqV1lweaz+4yaVdF0E4EKj2oQHDATcrwqvFqpRNC3m0GGsnfp4yNG8gzfwNR5B8HwvzpiSIy4ypY3Anqq5XXFKKa7WS9aSb6tTHIJkh+ujQD8Egl4YRRq7w=="}],"signed":{"_type":"component","description":"Online document for TiDB","expires":"2031-10-21T08:19:55+08:00","id":"doc","name":"doc","platforms":{"darwin/amd64":{"v0.0.1":{"dependencies":null,"entry":"doc","hashes":{"sha256":"1769642055dd8e4945b7ce2cd409b99193a202c92b8d1437bab86d3b08c077a8","sha512":"8e676496fab7f16577f29c78e9f56fd6cefbc427b6d4c68dc59959994fcadbb739bc491542887809a625a5ee2960f25c7d405455d35e720d819707999ef70b8f"},"length":481889290,"released":"2020-04-16T21:44:48+08:00","url":"/doc-v0.0.1-darwin-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"doc","hashes":{"sha256":"6c774110df78071ede932e56707e18032272cd8cf35655781de87f3c902163b1","sha512":"2c58540fd75ddab2c13cfd87eaf684923276f1bc936aa735677208fb5f8d9eefbc044a5ac8b0f2c73544db076461f321abf035b2d7f6bf4b4d61dfa24df68175"},"length":2349202,"released":"2020-04-16T21:57:35+08:00","url":"/doc-v0.0.2-darwin-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"doc","hashes":{"sha256":"f78716462d3db7b1df3e1a8ede94fd014f6eef9a0eb4432f116a3051634f6283","sha512":"274728501ef1b32e05a67dcac843cec306ff256c5a740ae178f2cc11457cc47743ed80786261c3e2eea341185a8220bfb6249246fc5bb3bcf45448ae4cb7b780"},"length":2349206,"released":"2020-04-16T21:57:54+08:00","url":"/doc-v0.0.3-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.0.1":{"dependencies":null,"entry":"doc","hashes":{"sha256":"8ca58dfe89e8dd54456150a091608fcb4f9df3f5b703113e34bbb5b37babbfba","sha512":"07932891fcaeb6bc4daa78b05901c96487741a7cfcf31225721b224fa6346fb1eda954bb84c0f417397624ec794876464a8c757f54f74f1327bd62a73afe520a"},"length":482033553,"released":"2020-04-16T21:44:48+08:00","url":"/doc-v0.0.1-linux-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"doc","hashes":{"sha256":"c57dbef251699746190711ea6daeb0ab1d7289a18907292f104b0ffab5f0be78","sha512":"e68b85604116bf9cd3b53c1c2cce9ad95acab1aec0e39a979f963bd04a9b4efb258237e173fbf2316b74dbd22980ea3d73afac52b2542d976da0ad2e57a6f000"},"length":2366028,"released":"2020-04-16T21:57:35+08:00","url":"/doc-v0.0.2-linux-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"doc","hashes":{"sha256":"0cbee53d70db642590fbe8ece05ae6f9405e27e973647514479b7ceed5bcd5af","sha512":"e24a97500e899195a4f72375d5a44d820d20e9b5f1553c4d044262821c3e1004e85a496ceaf81bcec4d42b374dbe2422d0024fa9278415361deeb94925f21c58"},"length":2366035,"released":"2020-04-16T21:57:54+08:00","url":"/doc-v0.0.3-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.drainer.json000066400000000000000000000477751505422223000240620ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"mItHx5Havk2yiqSLtur94Y/MrCiv/qOMIlo2G8u2kSzsdLNkaj5g7df80tFPJ9zL3o/ocE/mjUMBBEddSrHbKBf8iHmLd2sR2O9TAotUFaUPKVuRnQSm4sUHQ5V5pwqA1ZtzvDK4XKmkdc28WVAJ6TZCfA5MjYJM5gEeFAFeZI/OBZ4Wx9uETUpd/wF08X0fPr7yC5ePg7ZyZYRMlUBZFP9gfDoaSHjiDnh0W/yJ5n58tfwzhMkRe0+832Xf3n3W4PXz9yA6oQLf8CZFUF9jCKXd1+LCiRN2pb3wKqXpQxJXVQOdTcxhX7G7AGj16GIF6dbVL1iIZEAjHV+VA1wybw=="}],"signed":{"_type":"component","description":"The drainer componet of TiDB binlog service","expires":"2031-10-21T08:19:55+08:00","id":"drainer","name":"drainer","platforms":{"darwin/amd64":{"v3.0":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"8b16f564e10211779ffce138842a391bb83db1620f5d6aee327211cf0a588b28","sha512":"c58b770b5b601c932f3cad50b87af5bf3c0a5b94f5058b669de2bde36de1f6b435ed0b722a8c4e9b8a611e382b264571eca30faf22ceb03f81cc5f0578eb28e2"},"length":19852080,"released":"2020-04-16T16:58:49+08:00","url":"/drainer-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"78e8e4ff56fa8d95964799aa33402d321d2d419704c12f74febf97fbdb7e5c61","sha512":"3523bf251f6a23134f2aaf04fd0c2b3c30a9952de78862f0bd0ac8ca6591cfd89e955953fe8d7ffc8fe962053ab9f39ae94e6a99b6aa97fde9b7e510e03c43d9"},"length":17756607,"released":"2020-04-27T19:38:56+08:00","url":"/drainer-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"ff1d40c1a471b2d314782629f70625e72fcd4e099a66322723037ff23db87d98","sha512":"9514cdd3e82152ed2a5d6df21dac2666b5288b9bf90ca5d2d63accccdadf5f9e367e085330789b6b9e1812a3bec18ca7678770fac4b808481323f8534504d833"},"length":18661110,"released":"2020-04-17T01:09:52+08:00","url":"/drainer-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"43249c5b36c05bc25eb6100dd068f1f0c371269d3ead9cc0aa9ce8e138cf43c5","sha512":"468183132842a8dcadb08bff880eedf1b00fdc50423dc0f3a8bb27f5f91b3a95200ebf97edfdd3c5a1feb1a0624f4cc3828703edd2c711aca4dd446084116486"},"length":18659948,"released":"2020-04-17T01:16:36+08:00","url":"/drainer-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"2611886075000f55836ad9c4d71f30951574e9f6acd9f0aa201d10f709e1ef5d","sha512":"b30c18de7ea99824a47a9cdb01f109b905e5e32f3b5f318f8609092b32febfb0226a524d31e9b6b21aa7fc86c43635a31a1a16bfe4ba31a71680ae63b69efd61"},"length":18665307,"released":"2020-05-09T21:12:07+08:00","url":"/drainer-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"3d9125e366a1ea6731cccaa2d50e9916b2d12e926482ec42bf6185eb3a596acc","sha512":"76b8c39d5c4a4e2fbee2ca2e3b097fc36f9fa5006429f4bfaed7642232aec37fa854bccf0b89cf2de8c27ffb48d1fce81de6a20feba5f6293f7e376e221275fb"},"length":18342735,"released":"2020-04-16T23:55:46+08:00","url":"/drainer-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"4809145f52869c41738e5e5d24d0e50c10c18d830332c764ad37e47fadc49aa6","sha512":"de0e0c265822c9403b2531f4df59bd8fa1548cd60add0f943e9b4e36060fd9532e8e5e1b1d55a781a6255b484e9469a1096a5832674771ab3ba10150594d732c"},"length":18342755,"released":"2020-04-17T00:17:02+08:00","url":"/drainer-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"bf0346a9f19746c7848d0790db2fde908e838618378c9c1b61aea1e91a0eb14e","sha512":"a1bdef3fea40502d61d2e2400b978ce119030eba63334401043be3867f8c8ed64f95fd50efae02df71511a89391b564669fc0f25fc5698340f22654242fe9ede"},"length":18339094,"released":"2020-04-17T00:23:16+08:00","url":"/drainer-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"57f3b5ee653e4b0931c8c5584ed3b659385bf341492c98463362a515b6e003bb","sha512":"f8cf11a1e3d1a07216fbb8d163693db7c158eb08fb88cc45df9c26a65affd64593d8d8ce7531d84f799bbbb469ec5a49df3b0dca82ceed52b29e985e6b739373"},"length":18371937,"released":"2020-04-17T00:30:30+08:00","url":"/drainer-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"349f9762e093db2f56d1a43157d3e111b2f7b12ba334c66f9230a1860c9d5ebb","sha512":"a4a3207eeeb800ac3de1b3a6b5fb13de80f7de4b3be7fc29b1f8a515b25c74809a3e128dc68dac7062c370f831bcb3223198a48fa466ad32bd1a000b134b706e"},"length":18858501,"released":"2020-04-17T00:40:05+08:00","url":"/drainer-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"4faca2eefdb920690649a88f01ea513875f9d2623de678ca7c886c825c7a0619","sha512":"6598a46abdae1bc70935c54a877b626ffef4ec5d0763a77bb5028767747b9f80717b5d9707499f6fa4e8afb601a356ae349bf42fbe8069cd7c266db05a810e78"},"length":18858668,"released":"2020-04-17T00:47:03+08:00","url":"/drainer-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"f93589788f682342765d66ef16fe422246051e5d482feae5192ae4b10398f78b","sha512":"cc366242ea36ece05c99133b903ae8337a944210505da561cc9b2fab5b5f9c006cee2644a74654f2afd913eb6790b2fa7e64c5f8ea8826ad78f1776ffbd04b0b"},"length":18858658,"released":"2020-04-17T00:54:54+08:00","url":"/drainer-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"29579775e00bf11a24048da5ecac5e5ab7174fdf5de08ea0505e47794524641b","sha512":"1587334b090f0e8470a97ae1f7160e5f5ead6e9729237ac32df2e946a63dd3f6058c43f68c6b493339d76bb5bebb9adca2b398e0a15d2a534ca54cea71b51bec"},"length":18892378,"released":"2020-04-17T01:01:30+08:00","url":"/drainer-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"fdad20d2d27215fc4459063ce46d7b516e81a1d622cfd8c58387643ebfa92665","sha512":"bbb356a847bb1d4f9ef4bf87f45369c0bfcd57af2f30f6b98e562ffb871cfece18192d5a3729e91a81406b3b1508dffb34a359c0ec62b45d035ff32bf62e7caf"},"length":19852078,"released":"2020-04-17T11:10:03+08:00","url":"/drainer-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"7693b71204cfe60ab6f11b329aa32d24d627c76d4098bf8da3bbb19d2ff8e223","sha512":"fe76c6418a57ff73477c2af427e3334503a750ece740345a4d9b59029f284cac5ea4c344c293d4604a32cd7756792d633ede7f202307bc25114c61ea333da0a7"},"length":21913748,"released":"2020-04-17T01:22:42+08:00","url":"/drainer-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"96e3b8a5d0e55f93b75fa22599ca9e34f6c375340185d903141e602baf2ce2d1","sha512":"bf5fe73863f3ef91c7e86a566154b5f7ceda84e890c502a6246b34eab7c751f5361315af64b01059c4c00888d2334855ab9475f62a354111578e4e08b1a89c5a"},"length":22070027,"released":"2020-04-29T01:04:03+08:00","url":"/drainer-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"2dc5ec1810f7c5707701cbcf24ff6e83017ed0978fde27d6ecea18bfceaee0b7","sha512":"9624d3f9ade2e88d64a4ae2baa3e75516bdfb113862f57ccd5886a2cbe6ae4f029dcbb0a6772713dcb9658a1785dd1ed13ccf112f2f3b20d8bbc0fe5ff7abef7"},"length":22072794,"released":"2020-05-15T21:55:18+08:00","url":"/drainer-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"43d85c9ad200ccb7cc308d0ee415c032153524a256c2c4f90752ba38801b55e3","sha512":"367b14ce9119b1f607d0229507298ee9cdd1cd6cd4e90ee125a97a0978ba6e2f239eed445a1b46a43a0c6ca8a2300c2407c2acde8688b8b613821fde8aada0d4"},"length":19036282,"released":"2020-04-16T16:58:49+08:00","url":"/drainer-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"6385a20ec25e1083248dbb7538ce80283536ccd367d24df4853f2d95a527e2de","sha512":"9a90056f85b1bd21e0b01674bba01ca67b7bd89b47b24543f396e87f3cfc4e5dc5ea284b26e7b566b0427695c0c096cad3132e63b811ffb6a29956c2a3ec30bd"},"length":20591706,"released":"2020-04-16T14:03:01+08:00","url":"/drainer-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"8d7ed8a7e45b1239d96d7551ce5dcc8797c66eb8d886850b0bee71b274d88034","sha512":"39dddbfed6468fab7765bd57c5eb18249e01ce50108373d00ca10c80efc1acf3ee222d7ba49474194fb3d4cba3037b7bb642b8800e9d6f63fde24ee757e13880"},"length":18573082,"released":"2020-04-27T19:38:56+08:00","url":"/drainer-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"26b33f8a20e664d2a7564b6fa85f767ea34caca979dbe7616a41f78ea5feb203","sha512":"9613bce36931db33deeda7e13e365e83e263bd139dd734641197c76305f32035283e652b6684e1a036c80f321ce5834c95379d8a6d2946e69ab202bcacbebf69"},"length":18808148,"released":"2020-03-27T23:32:09.258485+08:00","url":"/drainer-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"6283ad5f438db8dcfffeba19c8bc226c58568edb84753081d3c277c75d47ace5","sha512":"db2cc9e19819922413cc3d2778d2d80d88886e54b06f8dbae9f1b89a2104b627c7fada07941df1454fe07d7d4993ea4884a3dfbaffe812e03e30de8f11134837"},"length":18812079,"released":"2020-04-17T01:09:52+08:00","url":"/drainer-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"c01126ab8bae1d99b55d7909879d2bd225a377691d55e7143cfce8808fe1f843","sha512":"22f9636b7e5ce36345969551e1611397f4cd55a5fc204f58325e8507131d2e7b43c853443829185903f6f27ab6b6b11bb7f432782c3149727562097333b849f4"},"length":18811987,"released":"2020-04-17T01:16:36+08:00","url":"/drainer-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"59e058bfe8f3ef935fa5fc9cc7f6a61236900254fa20bd51d180a244623be359","sha512":"0dba95654c45fe0436402d7a5a08703a8ac47fb8596d4e8031e599260a33d2e9e49ebe262c5cba1a27f87bb177d1a754775a5e1b988b925c10e06ff6854d1747"},"length":18834134,"released":"2020-04-26T17:23:12+08:00","url":"/drainer-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"a9ebd3ce4c325c3851052be7814897c6aa1782387109668255f6b1fe5b0327ed","sha512":"d812554a452cb0d4809b8486d91bfeec11d938aa51025c996047b36eb278950ff9ed741e275c870f4bd5a6a6874453703194b3a4ddd2a6d0d7a22452450f5ede"},"length":18834116,"released":"2020-05-09T21:12:07+08:00","url":"/drainer-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"eefe782513a313f98c953f1b0da2c5e0f3a8d98169ac774155b6ce4253a0458d","sha512":"87997572d413106904c1b8cd5f26c645efab78d98967051da22033a5c92b00a2383078994d526cbc88b90a8a79c2dd1a4114ab9719e538d97556d48cfdc5af54"},"length":19224971,"released":"2020-04-16T23:55:46+08:00","url":"/drainer-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"a3470dd271c48ded595a47b8ad072c7edf8369d36ca676539ebd8a4637cfcd8f","sha512":"110752ba524c150fae8069ed01058e226e1a34c95d8b4acd37e0e9eecd11af99844383895a5ca0b6b2a98f751f50f206269a464d1634758b94073094f08a77c3"},"length":19210036,"released":"2020-04-17T00:17:02+08:00","url":"/drainer-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"cc2dd787249b83a0ef0e188f0534870ebb038d1d6c6cb6bf1954697e1436c32a","sha512":"f0262173b8a84015ac929b90d85dc68b7a3acaa9baa764e40208925893e648147ac0854a4ff0113df0baaa349192db35ae23a47db57744d94b5211b2838fb224"},"length":19190350,"released":"2020-04-17T00:23:16+08:00","url":"/drainer-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"7959926aec43a4edc9b879c2f82ab13ae4cc7b20616a95d0c4d78a64dea2fec2","sha512":"f10ad6a6d64fb2c41692774a10e77a5d4e35ced17a7d53db35dec6d2ade9faf821965627ef0ec81151abc0b69d062522e0d326b937021a7efab5f3d0ea19864e"},"length":19229789,"released":"2020-04-17T00:30:30+08:00","url":"/drainer-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"63409f29f6eb7c5549930fc17aaad2d6939d43ca762cc206dd25d9e126c5894c","sha512":"f792fa9f1b06a6e225ae7559e4ac4bfcd59439bce9a41010c0055b5d13d4eca31c003a09dfbe4051b2569edfba4010f4dbeb19e8d0e2d0c1eee6ad51e803426a"},"length":19017232,"released":"2020-04-17T00:40:05+08:00","url":"/drainer-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"2e4bbf6cacd30f814752cd47458114b5d71ba166e99a04611c0e53d6d658c998","sha512":"8ff48498ae615500263ff3485cc37904bd98451386e68e8861ee13fc715b4d876902605210b4af41e768a6b25079eee169700ea51e35a5ef7cc4d0df60543e2f"},"length":19017236,"released":"2020-04-17T00:47:03+08:00","url":"/drainer-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"f1d7e1f0c9d2af77f2e52dcfc5e2006144f32ef7ad6f8fb09f1976a0c18c20a7","sha512":"3d4dd4f7c548e0156bd0b4e5729a93990a30e24a0da05ed3f7d00339b98b707d4bf741917d61cee89100ab44faad92343bad18ded5741e6687cfbc18599b2b03"},"length":19017239,"released":"2020-04-17T00:54:54+08:00","url":"/drainer-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"c0bc7df5a5e7cf9c084b2f1224be66f3dd81d62fd389253624c6a52f94ee9ba6","sha512":"57fc0b6ec8507c37fc9de4abefafe1f5af011f3a0ba1a39ff9cbb985f8067400f76efba6f7742783234d484b24bdeb8c34438eff5da72b40e15a9e4efb95c555"},"length":19047038,"released":"2020-04-17T01:01:30+08:00","url":"/drainer-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"17aa50ca6f67cf4e0a01a949d9229ea34b9031564c38ba380ca2279d89171d1e","sha512":"060bbba2f287a9338402821ddbf402eebc33c4ab59ca696c5b48b6ee4905364ace232433702f71e9368c46c806e82575503cdfc08390e20b6644c5c4df9a616c"},"length":19250607,"released":"2020-04-17T11:10:03+08:00","url":"/drainer-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"184c8f3d6778a393b18874df341cc1c4eba6f95ec5edc63ed92f7075f5021b07","sha512":"f4bde2cac469e2076f73792b3d11f82a98a21cc6b074aa9dffc281d1bddc74890259a7232498ea60a2e4021e6b9844e6d1c266854bcd8fbd9a9b50bf4ace7336"},"length":19036427,"released":"2020-04-13T16:07:28+08:00","url":"/drainer-v3.1.0-beta-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"5de8404e49214775665408668f881098d43f04006a36942f11892835b540328a","sha512":"12681fae179675a9ef66e503f3ec81fc3882a9f2288d31a5a9986abbf8976f36525ca1eecd018b6d9e16baf1bd288e7c0c5e649ced5829d52436086f8c60c17b"},"length":19047098,"released":"2020-04-13T15:45:12+08:00","url":"/drainer-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"5be8ef33692f4b37944ea8ef592927f8ed6752fc4cb68e1fc26a68ab43785c87","sha512":"f55d1cdafe5f69ff85898fb073250f8e58b6871b8d1cbf39a4a11191ded4dc4054b2d70a8cd55c509189aa641c33a055ca02dd39fd948163185914195ae584cd"},"length":19078810,"released":"2020-03-27T23:42:02.403746+08:00","url":"/drainer-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"502a33d7acf7358ddeb2180bef1977293629a733f16835c1ff6908a392929371","sha512":"6367c2804a4f4a3693ca67f7b7991ae0b31503b53644c21d29693dfccae00c6666acd22a9fef81c9e151350847282bf8b3306839f8b8e70c59296921a46d3099"},"length":19256422,"released":"2020-04-02T23:42:54.0280156+08:00","url":"/drainer-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"4c92843518e8e50bd4b6785009f11598867eabf13975dfcae3a946b275bdd257","sha512":"b99298e5f43856382909608dc7066d170e9e1d59286a08a0e6de69d4b7f57e87906b302508498da9a736a0a018cd04d9a94af5f5610b3db289c9b221bcea53df"},"length":19250624,"released":"2020-04-30T21:01:54+08:00","url":"/drainer-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"4734395ed2a5eaa16f4f735cb2cdf619491e9a90a62225caca4471dbf535dccb","sha512":"fa449301b1fbfb954cac9915b246de0df7ec288499289cf5d7d4e30de6dad9bb5b79f1b48c054c0a1804f68ce77e5f9849f36ab0ecb9213fc86d11b0e0c3be07"},"length":20659510,"released":"2020-03-27T23:44:49.967188+08:00","url":"/drainer-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"43f7035e4cf7f888485d97c26a198eb9df5e9f9252b36f3a1d495153aa250e87","sha512":"4814c09faedf472ce679acbc5e2f8a31284fe3695a3ae49eb80dec22c17ad15d5f0c92fd490ad3e9b562435361c4b5c2b4d5d8a3393a184a9071f4bcf0a19e6a"},"length":20686785,"released":"2020-03-27T23:48:19.54029+08:00","url":"/drainer-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"3dc49b75d3dd1b06bc8ddf30b8ad191fe658829ab99619af67bb2d7e9069a4e4","sha512":"cabe021934051d59069d4cf9df55fdcf7af8ab59ae7b837d1fb5a11847d68471554dc541aa86c8cbaf774858b21c8a4a6cf9667b98a43d320073d98dac40f82c"},"length":22017911,"released":"2020-03-27T23:52:07.12562+08:00","url":"/drainer-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"e813d19cc6939b4f9ad0c78907abdce32a7ccf576108170ba0e34c306ac0d9f1","sha512":"46d1392e8382ec44d5e079d1fbc826a3816d7d446695289349c7087958e06bdf93d1d7a0b9c2a27b0aa575f7de028f36cef2e73d10918e962960bb08bc1410dd"},"length":22109733,"released":"2020-04-17T01:22:42+08:00","url":"/drainer-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"1e9ae21e2a60e2a12e02dbb22b88a2cc36caa5875400e4d66adac49de347faf4","sha512":"033565d85f63acb44c9eb50e34c35870acd2c4e3a2ea56ee92d616778e666da84f14b4ed9a2b02bc5eed3b782ee0c5227e5c2e22a0559acdfa2e8ea190514158"},"length":22237382,"released":"2020-04-29T01:04:03+08:00","url":"/drainer-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"fbecc28f6e6fa878be5579e5801de6b6f08a244e9fee22cce38376c8aa1923be","sha512":"11ee7d0ec256633823c214b1fde6b1348f878b4ebd17baaa55832a06272f7c73499afb11e47e7c24eab34acd66e3c407077dbc70757279c9accdcbf66fbfca96"},"length":22246769,"released":"2020-05-15T21:55:18+08:00","url":"/drainer-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"0b216993e7e1e5dec0dcae021271509ace3f83713dbd72167c85edba86cdb309","sha512":"785361119a8814303f4d707038cb16b8f4d5b122c1f20ac65c6a2a0f7799ef490190a4f58b2d119b5f6e4404d5804ecc9872b8886808f4d9ec48cba285da130f"},"length":16735158,"released":"2020-04-27T19:38:56+08:00","url":"/drainer-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"350480a32c9f0302991aeee81a1f3274be93ee7077509fa45b9319823ff4c1c5","sha512":"37c2e85819e79dba67def996f3d406c5bbd9a51654ef93e849f242ec073c5833c06960f31cdb264be347272210dfbb3ef85b506376cf1b67534b9ebcbb435fc5"},"length":17601211,"released":"2020-05-09T21:12:07+08:00","url":"/drainer-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"748bf6a470b208dbc521ae9d9dd5e712ffb38e6f900fbaeba3f49ef60d1145b6","sha512":"605424728085fd8705a032e664d0103794519751219b32a38e767d49463cae6a8fea8600b91048ed4eb0f4bb3ef168b8830e88595c5f9acaaecab442b070f8af"},"length":17992298,"released":"2020-04-30T21:01:54+08:00","url":"/drainer-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"081155a512e283e8b554680567d3f07d8ae1ad751d14218cf92128d35d35ee20","sha512":"76fc3d5777a685cfd12520c0c111da353e0026ef5646fcf71a7396d41fdcf8ac9e99d9dcb70d04ae725f08aa2a02ef6660d3768938a39994731297cb3d896940"},"length":20572418,"released":"2020-04-29T01:04:03+08:00","url":"/drainer-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"drainer","hashes":{"sha256":"0faf9a0174b56407313ddb6dbd97a6b3d3612bfacf82dc6d0f5b3ec2fd452930","sha512":"2cc1cc0a076007d7a8ae833826cd6649a5415ae809854a6ed6b159b022eb813cd3cc385aa04a45632a94e98ae8ffce9a3b3d4651939088008e962a9dcdef95a1"},"length":20572420,"released":"2020-05-15T21:55:18+08:00","url":"/drainer-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.grafana.json000066400000000000000000000625101505422223000240150ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"GKTrFDENTS2BQjYONxtyF5FD9Pge9UOIMSuxJNhCZ+4FsHV9xCEvlY7spV+ruXGcpDGS0SOxhWx1h91MtDOkhc9Fe5Ywup+zoGK4Djgxc8yABhX+Mn1TxEk6ig0a9ct/g8ZxhWTCXifnNso/kC3PUj9proaK68O+h80yws0d1XOWXUZm1/M57yS+M2bNzUbliaD94a1R7PiJS8feTyJ52E0AQ+IBXSUNqrZiromYZkocPcOU3XsGbOVgkuxL7Ca4r77m4JLV35A5lJCoBKVxi0OAUthgdptkive8L1jecOY1kUQNBNPlXsgc1JTsTTxWajtOZ8uar7mTWHRgsqThdQ=="}],"signed":{"_type":"component","description":"Grafana is the open source analytics & monitoring solution for every database","expires":"2031-10-21T08:19:55+08:00","id":"grafana","name":"grafana","platforms":{"darwin/amd64":{"v3.0":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"b21d9fdbe9eebb1eb761fd2a282b89d6d2f1b7f816582cb1120eeed236cfa5e6","sha512":"463715f9409f955e3ba05c17b4c492ed8975b3ac3e9c437d8f95553f01760ceff7463eb20be8b27fdfb5042365b32d25be8a79702982e1ab85928054d1955acb"},"length":45832397,"released":"2020-04-16T17:02:56+08:00","url":"/grafana-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"ed472ffe16e06cef87729d17c9d83c178aa69e2c79d9063c1ba387c3c958c467","sha512":"dc630fb03a0729167363d634e01e028aef181148ff4743b6a74a21147f95739e8114baa56c94e22dab2acc68802d9b82cf70e29702f38014837090905816f985"},"length":45906785,"released":"2020-04-16T14:06:37+08:00","url":"/grafana-v3.0.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"9cec7af72309f7836dcea0d79911285314706a7f73e770868d4667ce486dffe6","sha512":"9e621cc048ed565df30d93b1185165842c017725ee98a5c95267d20934deff11fce1e139b8f1236df3c737ebe8348ceb37450429cdcfe2e9e6060ea39fee7fc6"},"length":45915688,"released":"2020-04-20T14:34:12+08:00","url":"/grafana-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"39baba1cfd4bf59d78dc9c726fb7b040c89ee9c6d38bdb75861f6bbc0e805268","sha512":"20f3e1e6ddadb50024a9c11b9724c02f6035b8044835b716839c7a6a588a8cb17be3bac809fa47c00820fcf2247b7b6efa3a3afb033a2e5ed9cf5eb032a53559"},"length":45832064,"released":"2020-04-07T19:48:24+08:00","url":"/grafana-v3.0.10-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"f1209bf70bcaff7e5061fcec5bfc30e17c4b9bd2777edf41c409dfd27c5aeed8","sha512":"377d960da38cbd0e0cd0a21b0d496777c8f2cea104efb062741eebf23d8109cb17741f4d858154d4d2ebd92cec3eb15ac2b6c607b69e242eabcabe5526e50fb1"},"length":45922290,"released":"2020-04-17T01:12:13+08:00","url":"/grafana-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"54a3b9b2e209010b0680acaa9fd7fd243b17e70e5a2137feaed77353da1baf51","sha512":"7c40949d0ca4249e5504de4d85faa119525e111c0effaf4b0aa8643e693ca64b500770fc56f204f96fcaeb099386dd267c0529bd0aaf174fd2666c7da0b19526"},"length":45922590,"released":"2020-04-17T01:18:44+08:00","url":"/grafana-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"b0043ce3d7fbb7d7f0fa52510f5c77d97ae8dbea355d01f2d41017204feb6434","sha512":"9814f892a6def1624681332931cdbb5c212e0c630e76b223c90176df26e466dc3323714f0c3a84a91c3e0dbccaa621771a713a70d83695d41827b667d74fc078"},"length":45922621,"released":"2020-04-26T17:33:52+08:00","url":"/grafana-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"8228467226023a2af4a2fadc07d549b49780c16b0de5bdc2da4350252aabbb70","sha512":"a44a614cb7c7826e1e345e72a8f8d17a152acb769110281c9fd35feda6f08f30fcd4d4c6d0b5bcee4273dfcbfa8419de358bcbbb7296f0e0863de94334fac559"},"length":45935820,"released":"2020-05-09T21:14:35+08:00","url":"/grafana-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"ee9c9278facce72cf1b747d3e61a8f1cd9fbb60b3f6f3c84a695a1f3a3a9b42e","sha512":"8e180a0716b70812a3c80abe4a30fbda341c3611db782b6624b94a46b5d694cc3bc10aff6e7c3ac516fab06657f95724ea29351fe9bf2fec7c5f458deaf977b7"},"length":45916264,"released":"2020-04-16T23:57:47+08:00","url":"/grafana-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"393f228dc3e5786652d6e0fcb2ea8111604c83e8edbba87bf266facb622ae185","sha512":"2e6630e860a31eaeea8ad41d4524f669efc825e976cf3f1588616089b969225e63aa29ca68d5b85957e82434bf49ee710c33fb6eaeeb94021eb23533fe58b782"},"length":45931679,"released":"2020-04-17T00:19:05+08:00","url":"/grafana-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"7daa13b1111c9e3ed4eac2b8bb40649369905562346518a253b560c245189996","sha512":"ba6e684effadb98da7548a51ab12ae8eec9a5a220bac4aefb9d60606b349fb8dac36dc083a28d112d6f09975797229a5b6754ac7928f8249ef6ea52b3928fc1f"},"length":45938172,"released":"2020-04-17T00:25:35+08:00","url":"/grafana-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"484084edb7e91835b9b14424d552afd54ad362ae967fdfeef1fbba63bacfbcde","sha512":"afde140311870c24ba388f08a9fd001658cd25ab4b26ccb8e7ef46cf2e9f756f974172f97404fa9c05df3736bc4e25ac26fa64ae7d5467d05c6bda346266f10b"},"length":45923235,"released":"2020-04-17T00:34:43+08:00","url":"/grafana-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"41027fa95e2c6679699dc870824f61994b10c4e67097f3acd3d38c2f27f4322f","sha512":"bded82a5540c241308be1554336a139f8d03b14e6281b401bc826de6ca21df91da8179d779304df11f2a1d5a9ed459fffe2812be3d20a111799c11db3a92f4a0"},"length":45923116,"released":"2020-04-17T00:42:13+08:00","url":"/grafana-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"fe1a3a3931f00339848385f5f82a9bd6e9dda1e8c2fe0adb0b52856252a92d22","sha512":"facf416b5bea60d973af8b21b815a9e19c80f594630a3c382e728d23c41a226ae039c5a0d312736747bc72a22b5cd6af894aa9b7bbad067872703c7474160c35"},"length":45923209,"released":"2020-04-17T00:49:08+08:00","url":"/grafana-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"26ac462abb3bd9fb510f3c14a8db5f3c238accef47828d8a901ff901194b9166","sha512":"4189c571f9db59c362388144f76e5cc060c7dcbca83fbeaab090ec70da1778745ddf8e17a6176ce2cf8edcac3b6cc4a96d2bfb307e17095652a3ad971d2fbb76"},"length":45923921,"released":"2020-04-17T00:56:56+08:00","url":"/grafana-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"11504cfb7e6e7bf30b24fc98bdda4b90432511e5202513e1a969ba2f743475bd","sha512":"a68649ccaa7eda14103b1ee307f671a586be09285095a9864d64f8ce5906c7d0d613e60e457a60fac11f8ea084e59f5ecc46024deaff5e8d982f2641e85231c2"},"length":45922359,"released":"2020-04-17T01:05:00+08:00","url":"/grafana-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"9b7e8937132483dfbe0bd4047e379bcdd86b153a482b0c28ca46a47d3e3254ad","sha512":"0f27489060a9a9092d67e12436d11afe429d7a665a6e4cb26a878881a02345992f39d316637978825297d450dad32bba0a2f5ee1c065aef95bf2c943ab5a00ab"},"length":45925780,"released":"2020-04-17T11:13:19+08:00","url":"/grafana-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"be3496f770122dc44c60b5c72c68f2058e90c56eb6893f9b215478371365416e","sha512":"c5f1fc8d8da0520b350726d3d8061423d664cf4cfa43592f683a564c39be214d023483c566250eb8549c04806c53356011d28ae0596dc46461ba7303293a4d03"},"length":45922715,"released":"2020-04-13T15:48:11+08:00","url":"/grafana-v3.1.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"68f9e5493e3a43de4d9d348015bf3fde412a00feb9181dc37212cb1646003ce0","sha512":"456d176c3e05519d848bf22738dfeaa5846fd8aba712895818bba2a04aaed2e2c4910af91b169fe234667d499b504123cd9ea0e7166302ceae3491f47756d295"},"length":45926074,"released":"2020-04-07T20:00:29+08:00","url":"/grafana-v3.1.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"96c4f5f0abbec40190ac0d7ce2ea63ae047bba7ede006106eb69a21ab247124b","sha512":"1da7ad09acd231da127b5f37ffd71f2fb8ae41c4713132a819b4cc1199e86c45b2d19bff4ab68b3a3049ebdb2b26b3bb103eaba70e3ebe067d5f6ef0712749e0"},"length":45926307,"released":"2020-04-07T20:06:08+08:00","url":"/grafana-v3.1.0-rc-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"df24c12f429f8c90b86ba71327fff7a053271bbedc2c87b502056a21580d51d7","sha512":"ccd0585847e15296c992dd7d8f8ebe58a320817809150739e151c6b3333a571cf1d52cedf616b0bf08240405419f781b2e41cd2fd212dabeab9ae873c806afb1"},"length":45964543,"released":"2020-05-13T11:31:54+08:00","url":"/grafana-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"cd594a5f717343034ebc513d47259405f81ae4f163cc7247ed21a36956d208e5","sha512":"06cdc86b12e18e48ebd98eaba23f8fa9ff78e9d5998a284203faeeafe9a7524f97133d1eb0312b3b17841eb2dee05994f543be6c3bf599cb03429fd03a497075"},"length":45937443,"released":"2020-04-07T20:10:48+08:00","url":"/grafana-v4.0.0-beta-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"8842caf6ba6bf3847ddb40ee63283880fcc47cfbb82e74a65584ba5c132152b8","sha512":"186365befbd0cf41bb7dcd3f3bcf89b1adf4ac2c15556d3e10512ff93e61d46fd7b1281e956d7c2a46ea3e5dcaf3067208aad5b2d5ff6e034468f6ee59efaaf9"},"length":45941147,"released":"2020-04-07T20:15:13+08:00","url":"/grafana-v4.0.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"57d8fca6c3ae2c109c0bb62025f4138d695201e150d3fe15d9819d746d57435c","sha512":"e3074e7ba0d313e11fbf7ef505c462c8688d533089099cf55c69351ff9315f5f872c860f8d94c9e95c09d92a69ae075c3546b2acd71a9d4561f809cb089bd15f"},"length":45941181,"released":"2020-04-07T20:19:18+08:00","url":"/grafana-v4.0.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"7311c713e40b7499e600809822f48fbae5b6dee0a5725e27742d0db928e21464","sha512":"1a1445a2ac0ff500f4cb015e3240bf622d7f1a0617bd7a5d5d0f7c96dfff5e6c509486862425bac10157ebd8eb6f87e188f180d4b5265f5e19494f9f43338292"},"length":45941029,"released":"2020-04-17T01:41:27+08:00","url":"/grafana-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"67a611519826bc213067c7c0cc51b1994e8ad52c7c542f0f0138d12cbe68a237","sha512":"72e9504f6cb8886716ab07b303a86fdd0c5c5986b03107fefb4c475eff4456588f510cc076f611d19792f09fe58b5920f8df9d39eb58816299cff402d60e3844"},"length":45972633,"released":"2020-04-29T00:28:18+08:00","url":"/grafana-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"7786e858d2b8bb07d0ceddf25db929167bdb349bac46c9eecfde87ddc39bb2e4","sha512":"d2de93b1cfa0c2fc85c7c5b1d768d9c6caa60e0a23c530c272fb4d65790287437f244728dc131bcfdebfd9c23b49236632535ad8fea460bcaea2c1ca52e49f6f"},"length":45983666,"released":"2020-05-15T22:13:37+08:00","url":"/grafana-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false},"v6.1.6":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"ef5050c4038abbd209dfc55a45dbf7da80e5450ef78229e05ca6863d80d493ca","sha512":"6039e9d3952ebaad582a2458d5786f4c1ade7e99390a911b3a6b26aa1fcc211fdcccf427885f822794795543483de8a46397b84b584d7ca7c84c59780c9d1877"},"length":45931947,"released":"2020-04-07T15:43:05+08:00","url":"/grafana-v6.1.6-darwin-amd64.tar.gz","yanked":false},"v6.7.1":{"dependencies":null,"entry":"bin/grafana","hashes":{"sha256":"4888e689275f1ef0d4d3051969350d4078b6bcc6c47677dcaa42f7e625b4af91","sha512":"2b4d214089b27549c6b7542cf24afc20e5ae489ecc9e3c2cca0f2c5aa11e08dcf8d54fa32c63a0cb12fc8029b70ff25b6517d60aee58dcb89663bbcfd06eec7b"},"length":52233848,"released":"2020-03-29T15:06:39.282001547+08:00","url":"/grafana-v6.7.1-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"71e855e742c9a381d3575bebfa9b404baa52002943fde768fc6c1375bd7bf4a8","sha512":"896fcc89c868a9d6af992db4db103aadeff35366efe48cbef4b0a9225e6895760de891eca9f1f0df8dfc4f559a119cde5f4d447ab7c7eb2c40d557c64197ffe3"},"length":56707818,"released":"2020-04-16T17:02:56+08:00","url":"/grafana-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"42c391493f8b2fe57d5ffe43c09d6184473c1782cc0041a16359b183fa08e867","sha512":"81bd41118bffb787876e8f86b529560154e901be8e92004bdd719d440f253084170958d89e088c4b1783ffdd6575f57c0591ad2981229571cdc3a894a42b431b"},"length":56781119,"released":"2020-04-16T14:06:37+08:00","url":"/grafana-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"258d59b184e174ab8003f0ba734574fb81fc266b78595a513f94432de51a1003","sha512":"5c749da51e28f47e280e59db461d6e7586a63628b68373c861fd86116ea981d5c123278ce53590a5b9357831870c30ab3358ecbb05d92f47c6c362655b92cfc0"},"length":56796011,"released":"2020-04-20T14:34:12+08:00","url":"/grafana-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"4f53b53807a82c55daaeec27b5950e73573c827e70b774044e33bb1575b84f70","sha512":"07464ccb80bd7733ec259302beb63fd4f7c69146bdf27a93a73da6f297199ee58b8630700f38904e0e5526c1171f530c2df0d653ea4d7ba87190a7f91ad13c66"},"length":56707888,"released":"2020-04-07T19:48:24+08:00","url":"/grafana-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"d1187b6cb5bc739af2f5cca87719b80e0b81becc935e26aacb913ef4db8842fb","sha512":"5f58b07711fdc8a01f8a62858a4e6eab0cbc171275df0d04c1bb201bcbad817dafc8d244ed571fd95879e443126f866a3a1eaa5fbbb5e14804ff8804799dfac8"},"length":56800934,"released":"2020-04-17T01:12:13+08:00","url":"/grafana-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"141ca36574600e251b58cd605678167c04fb517f8a1c8ce0a780f9a7b673277c","sha512":"40c0e7854e18f5f9f752fc023f10d5a21fa93bd5ea09f92e98996ec5e611fb29b6043c62cc9612a6fb2128984ebfd310c08305fba9e8b08c5db5bccf729d75b7"},"length":56801972,"released":"2020-04-17T01:18:44+08:00","url":"/grafana-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"dbff7a3c313be371ea293a04e124b86f34fed47abcc796738d60fa591576fc1e","sha512":"b7c0a3605e7f3cbba057f6dc02b94b17dd164c84b2717473f75cc08881e059d00039d3bfacd8c0559042b245a836828ff2efbc8cfedf0a7e41d76c49f07ea9ef"},"length":56801971,"released":"2020-04-26T17:33:52+08:00","url":"/grafana-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"1136ceb4859423f1f70eeacc9cc58de785f3119146ca463b4f8acd5af055529f","sha512":"e52dee9bc08a5d7ae3703851d84c57beeeeef3124dfe0831377d0adb4440cda978908e46fd197d29768b6fe97c6cbb8c3a61b00f1a82adf54b84b87a29e4c2bb"},"length":56809868,"released":"2020-05-09T21:14:35+08:00","url":"/grafana-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"41e3d3cfa7a848b0a5f6ac7151cc046952b9791aefc36e3722e6a5ddb6621fa5","sha512":"0af6792654d111ebf8c537374a13f7a0cffb260a9b86728fea56e6a8a5e090848a8f49672da8354c39bc479366030c1cf1cad8579f933278f63130345e122ee1"},"length":56791558,"released":"2020-04-16T23:57:47+08:00","url":"/grafana-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"8edb62f695ef86c8d68c1aec561f9686e21468258c0f7be9c0d781b824972bdf","sha512":"fcfc7165dc38e94329c9dfc0963666cbc1668b7ee17f9c0eb69d825c16846762451c20de7fd9e54763c3986932db41c8324d27db068fc8e7571dca0c02303832"},"length":56807830,"released":"2020-04-17T00:19:05+08:00","url":"/grafana-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"45704f909f1268beb20254f39f426101f341f439198fc31bad4aa6a63126cfe6","sha512":"67b043d52b0aa2c5fec2eebba49dd521f0237b32c8642285c552c96cdfe42c382b06dbc64464b791516967b7bd3d1754196240d2dfe68ecc90c0d4f263c1daaa"},"length":56809208,"released":"2020-04-17T00:25:35+08:00","url":"/grafana-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"46307453b729d52fa89e8b3a94d5af48a2dc5338015ee165a06fb92ecf343b79","sha512":"0eb6c128a60d8dcac6efb6d90ddc74dc63b4e36008c4b58ef504508f74225f20bd35c964b065e426b222ae28400dc65b339b3c10007bfabb61d880a2684dd87f"},"length":56808861,"released":"2020-04-17T00:34:43+08:00","url":"/grafana-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"21dc65009aab562e8048741b06787214ce8e095523992480de6dbe180f8e90a6","sha512":"0a9089c1620ef7c5a1ec39de4b64d730a41081c8d7aab6bf645dab7af7ce5340fba7d1eaebc988f610808fb0ba71ec29c74360f87761351230f2449564913063"},"length":56799686,"released":"2020-04-17T00:42:13+08:00","url":"/grafana-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"91a5efe99b03436a8ddd9a1db8cd2cbe82e52d4c103322cefd36d7bffadb65d2","sha512":"e336f1fc5801fc847e5f86d45b96ce13d589f8ef752fd751edcbefdf4dde3f148e6c84c78ef757d76530aa17d779c93df208f77932ed22465551ec7d41e2f474"},"length":56800474,"released":"2020-04-17T00:49:08+08:00","url":"/grafana-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"4963ef2266fd6b4dc3ac8d28eef69ee8d5e8c086d8c494addc7f7cfd32c3d791","sha512":"f4376fa1e30dab9568fa45acc0de721edd77705b6a74f2e1ff2f93fc056c82f97756702f5f8a090b07acd4eadbc0bd21889b32b52bda940c00ed4d0b73d1a263"},"length":56800782,"released":"2020-04-17T00:56:56+08:00","url":"/grafana-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"bc0e8be1f3c738cfa5c800e83019f12da518a8e90b2dc17303ab585835e6ee03","sha512":"5abe6626cf48a0f122062c8ea22eef86572f82a86e6093821e2ea387d8273729aab235e99ce622ab8f3b7c7b15d198c7d1a7290552813bc5897bf171749ce568"},"length":56800959,"released":"2020-04-17T01:05:00+08:00","url":"/grafana-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"8fd91aaf170b3f003dcb09d1931f60c7a14a885843aa9d918cdb1b950713023f","sha512":"1cce472ef0ccbdea8865fea40a7fe056a31bc3070d47749affd510fcc6241f103644eea86ada51157929845591c6921f78a3ce82c00c91b1bf16b5e062ebad7a"},"length":56801328,"released":"2020-04-17T11:13:19+08:00","url":"/grafana-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"151308f42735ec3f170ed826c453e62364e8cd0226223fbd73f73b9ce1d6973c","sha512":"2fce9e8d3f82aff445557c76078ab685c573eefce174a09246f513c540d35d92c0ddbc83735b9d96f78ee6c80419243990702309c404566ec05ee082978b7f6d"},"length":56800902,"released":"2020-04-13T15:48:11+08:00","url":"/grafana-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"ed1e0eda100bbacf5343b78b4931ffab9564ddf0711c0da9883382f19debf387","sha512":"e91bff2af3263d26fd94d92082c603b644ee72684d545e065bb82b8e67d7a1a453b11f7aadcff1e2eeda93c34173e14402443e8994fe8279accae30f476c39ad"},"length":56800159,"released":"2020-04-07T20:00:29+08:00","url":"/grafana-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"8506c6959abff31a70efbeddefa3a553e9b6b46de907aca832d5138d676187ea","sha512":"e29cede8616f42eb7ed8b29004025be6c444bfd1c087847f0d3007c818083a4b5b0db464ee24462b6d4f26f89b02ceba311529933a1c955f63ff1c33ee8d5679"},"length":56801380,"released":"2020-04-07T20:06:08+08:00","url":"/grafana-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"813d24915a4ab4c818202bf90c5f2498eb4ac24f40080d4fdc89c7c453ff332e","sha512":"902db482b7725c8a56d7d1d7ecd38d8fb9775f1c5b8fbb1e76171c60e295ec9850085e86aab70150c8bea4b0c1298457ba3a627520e887024d5d88c0c1a554b9"},"length":56841483,"released":"2020-05-13T11:31:54+08:00","url":"/grafana-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"ef7005a3abcb2afc76afb6e012c28971eaac8fcba26939b51947ae18fba11259","sha512":"37603cc523eb9c200c756b572616da7f9bd5cb4173a56cc5503316bbb38c519619a518be2186e2afddeb64c903b002ae5f7468d1b912dac0b33b13d1dbfc4654"},"length":56809685,"released":"2020-04-07T20:10:48+08:00","url":"/grafana-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"418b8b28f9b7e38ef84d78868c3ca8e072eca35c59a997d20f2b392375dc8f12","sha512":"e8e7e26bd56e370fe3dbc334d91583a0143d97de0ba9a988775c80bd2ea06c4412fe5535180bf38baafdaf30cd57e95acdcbc1473a3ab863ea5a22a1343c1681"},"length":56811881,"released":"2020-04-07T20:15:13+08:00","url":"/grafana-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"824b88733dc2a9ec5ae511c859c0b915f853dcd583cb07a34467666078df9c6a","sha512":"21918cd20693c9be4a1f7cd4305bbbb0ef4780d6d320f85fbbf5219856bdadbcbb9a8cf6892527bacb611fea90e6246b6614ce2303e448e475b9c8dddf85d080"},"length":56812972,"released":"2020-04-07T20:19:18+08:00","url":"/grafana-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"4e5ba6d0f1e2fc890fac28080e3098d552eade1d7e6b039b3f4a156111a25834","sha512":"f29362db72edbc1a06742301b968aec0d29dad2f4f021df5c05c527612f119223ab90a4acf598939201a9091e46d58880ff56c964205964ffab31bd69731c5d5"},"length":56813220,"released":"2020-04-17T01:41:27+08:00","url":"/grafana-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"1a48c7fb14e94ffae34348710d29bd7649b38cae9211e4dc9c71f43aa5cfdb2d","sha512":"170a88c8f12caf090e02e8c3bf204b8b964e622ab7fcc3d7f99f483236185de9327fd192b5cf1c64accea1eb0e92f7bd0dec05de5e4e600337fbd9a38a801527"},"length":56845080,"released":"2020-04-29T00:28:18+08:00","url":"/grafana-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"255a9acfce21274ce37f9990d5b69ff0b907131214e65e1206e42136526bee21","sha512":"578c77c7383fe5b5fbb89613526775251be5e93c47f444f948f21fcfb45e7f86654dd9b3302a7d85fcce11f245fe15298c847937c4331ddacae94e3f4b74b448"},"length":56861258,"released":"2020-05-15T22:13:37+08:00","url":"/grafana-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false},"v6.1.6":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"6425910d4fed1f01188feb377cf154ad04b5f1dbee8904bbc75ff4f99e2d6091","sha512":"3a02d77064dcd88f9f3fb5168b7fb22501d3852f12348d552f2afe1a8e59226529e7ab0c965c395ed0fbc9eecab46e6c5c344bd2ba4afd9235922d244ac8847f"},"length":56810182,"released":"2020-04-07T15:43:05+08:00","url":"/grafana-v6.1.6-linux-amd64.tar.gz","yanked":false},"v6.7.1":{"dependencies":null,"entry":"bin/grafana","hashes":{"sha256":"19396487c18f17723056a4a25492672a8059e2d7cecf3bc050a4bbebf1c9469b","sha512":"3be4cd06d902612b0756b48c828d4463548e145cc6fec5d7cdbbcab9861f276233cd5dcb2c1cac76ed0576327f565ff3971f2c534f53a2b784c49270b9b65416"},"length":63986275,"released":"2020-03-29T15:06:39.282001547+08:00","url":"/grafana-v6.7.1-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.14":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"f1d8ccb120350067ebf006cc9bb35e7caea0395e8585cd3261d57954f3739e8f","sha512":"a22beb699609c55f81dfa18506071a4f059c50412d35e8f4b20ca3ad1d0d38665fa85ae3075b85d2beb8ea557b6ff2db0f371839ee7a5c1b4b0a95ed22be3510"},"length":29092710,"released":"2020-05-09T21:14:35+08:00","url":"/grafana-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"17581cdf32c4bd4cfdba070880809a2bb7e1df039b38337722ffe2341253128c","sha512":"4548f60991b6abb52c46401e6df80d9bc60b83d3ede0f8ad2ad9a7aa15c44be0154871cbe4efc2d0518aec21c2fd213d57991dfb818ce8380f5b1e1be00266cd"},"length":29117915,"released":"2020-05-13T11:31:54+08:00","url":"/grafana-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"82ce12be65ca2ac9269b788aa6eb5e510bff462b6c785a2efe3808000e5b3d72","sha512":"b1074273518af3e16d64bc897d962fa94223ddaf471e6df8266ab40d5505ee926526ffe733e4404e99556347875849dda7003d004838a7f64884348be60f1aaa"},"length":29127735,"released":"2020-04-29T00:28:18+08:00","url":"/grafana-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"bin/grafana-server","hashes":{"sha256":"52d4534887a38b0950b3e9a98ed89df93c95ed9bf30b8008e4c491ff15315ab7","sha512":"45755e84f46827ff3e86a9e0915766ed987031685cf60e8f05c9fb5381d4a998df5b27d805f45a63af7a4ea398f72341a1d9ebab74e7cef004cf58db2a026d42"},"length":29136991,"released":"2020-05-15T22:13:37+08:00","url":"/grafana-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.index.json000066400000000000000000000057111505422223000235250ustar00rootroot00000000000000{"signatures":[{"keyid":"c7b9a2470b206ec002c02bf508b58df0f0a72474f27eae0e70a9836baa084f2d","sig":"W2+7qnhORZ9RZ2fnYaWQ6C6g641p0lj0JUW7Apt7XnUQmWgkGYnwPqMtXqKcsV0FmOxsBUBTYtu7fSW1B0OQPhCGlmfYFazmQbPoeEr+OpvpUL9wIOAgngXKLUrq1f2Co+C88kXiD8nlywHkOiyzb65RKZp1+UBKdbEBlx6FGu7/1MrEYn7lnWxXRqZ4+N0DoAOboRSfnY9N0yLxvzg1/GqZnxF42osUgUUHd0ZWe7AQdjpuBF7deSmFWDLK0ZuzftB7WcVpONNK5hHgILJd7J1R4WJqLwH7OMRugTZ4xz8RdcvsuLXHKIJeKij90sRmR2kD2xdY+X4MgMbako2qoA=="}],"signed":{"_type":"index","components":{"alertmanager":{"owner":"pingcap","threshold":1,"url":"/alertmanager.json","yanked":false},"bench":{"owner":"pingcap","threshold":1,"url":"/bench.json","yanked":false},"blackbox_exporter":{"owner":"pingcap","threshold":1,"url":"/blackbox_exporter.json","yanked":false},"cdc":{"owner":"pingcap","threshold":1,"url":"/cdc.json","yanked":false},"client":{"owner":"pingcap","threshold":1,"url":"/client.json","yanked":false},"cluster":{"owner":"pingcap","threshold":1,"url":"/cluster.json","yanked":false},"ctl":{"owner":"pingcap","threshold":1,"url":"/ctl.json","yanked":false},"doc":{"owner":"pingcap","threshold":1,"url":"/doc.json","yanked":false},"drainer":{"owner":"pingcap","threshold":1,"url":"/drainer.json","yanked":false},"grafana":{"owner":"pingcap","threshold":1,"url":"/grafana.json","yanked":false},"insight":{"owner":"pingcap","threshold":1,"url":"/insight.json","yanked":false},"mirrors":{"owner":"pingcap","threshold":1,"url":"/mirrors.json","yanked":false},"node_exporter":{"owner":"pingcap","threshold":1,"url":"/node_exporter.json","yanked":false},"package":{"owner":"pingcap","threshold":1,"url":"/package.json","yanked":false},"pd":{"owner":"pingcap","threshold":1,"url":"/pd.json","yanked":false},"playground":{"owner":"pingcap","threshold":1,"url":"/playground.json","yanked":false},"prometheus":{"owner":"pingcap","threshold":1,"url":"/prometheus.json","yanked":false},"pump":{"owner":"pingcap","threshold":1,"url":"/pump.json","yanked":false},"pushgateway":{"owner":"pingcap","threshold":1,"url":"/pushgateway.json","yanked":false},"tidb":{"owner":"pingcap","threshold":1,"url":"/tidb.json","yanked":false},"tiflash":{"owner":"pingcap","threshold":1,"url":"/tiflash.json","yanked":false},"tikv":{"owner":"pingcap","threshold":1,"url":"/tikv.json","yanked":false}},"default_components":[],"expires":"2031-10-21T08:19:55+08:00","owners":{"pingcap":{"keys":{"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsRVpyxr+milHF9KYVAbe\n0bpfaXKtrPW75o8Q39bqKFML2BlW5b3kO667s4eEMXQPqY+dudHBRZp8uGvmdPUK\n73EO86tLKW99es6287hJXk241HYb2pEBxeWqPuQx/ogHvHzqBTderFBjl/EDxQJG\nZ8494YqsnJB24uMtUg5uID3Myn06m/x6kP5S+mG2yST2KtVAeGbZLxurpa99WGRk\nFWvf/OfSA3XbXBafRTbYJYzwtUFo2vqhdh3nPjjLESECaqVgOcyJoRHRxafROxG0\njAmJWrETPHYy6nLHkMmAc6A/I5XDl9yZRQVtcDujNjrg6Eu3gSwM2BFP0nabm1gD\n9wIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"name":"PingCAP"}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.insight.json000066400000000000000000000126251505422223000240650ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"Xqo+NONxMK6n+UgyjQi4Yk941+DigbT41W2f4xhSn2gDfcXPUqFKr8iCB6ZPSAW/pqUviqI+zI0n+e6lGMqalgHnEReZ+ZVIhO9UDFyaCgvuF92wu7cqFUfd/VoZpg1sPF4rBtaeVQW9EM1hQqBhpxAXcH6L7SSX3DYHpfA/xgHNF3mDKYvNDMP+q18Ei/E3+Ilt5Xm8324Sdt4dADp3Zyqv6i9jdLhpip1iyaQayt3P643biFeJ6xJgzv/xpEAiVve68P7WdPsBkpb5+N3wWAqgG/M8qOu7+oorLRgT4mfqXu3DuYfsRZrz4KenwEnfcwjkorE+HVfQnHDK3Dk/DQ=="}],"signed":{"_type":"component","description":"TiDB-Insight collector","expires":"2031-10-21T08:19:55+08:00","id":"insight","name":"insight","platforms":{"linux/amd64":{"v0.2.5-10-g3cea4b8":{"dependencies":null,"entry":"insight","hashes":{"sha256":"6ef9cb310e59627b95bf6ff956559307a4a037d39608c22d49e9d9c5ed74b052","sha512":"a740278d1877c1b02f107ee4dcdad98d8387b1274c942b41cb22742085e7d1042053706594c7ea273eb5b3bff92d44ad43b60cd801212a0f65b620a8b6732555"},"length":1143930,"released":"2020-04-16T13:27:20+08:00","url":"/insight-v0.2.5-10-g3cea4b8-linux-amd64.tar.gz","yanked":false},"v0.2.5-11-g58846fc":{"dependencies":null,"entry":"insight","hashes":{"sha256":"6547dfaa3cbfb61587ff5f057448d617784205534f333a6b7af6c9c37705e00e","sha512":"bc154793ba34939e7f19afe2455a00884a2591354789fa303ff16ed8ad4faff39a8c407504364cd99c8c642b6e096d139fd784d40baa0053025cffbb9a3cabca"},"length":1144752,"released":"2020-04-16T17:58:21+08:00","url":"/insight-v0.2.5-11-g58846fc-linux-amd64.tar.gz","yanked":false},"v0.2.5-11-gdb23d7e":{"dependencies":null,"entry":"insight","hashes":{"sha256":"5614a79ece40666607ff99cbddd2e1960dc253f67706562b51aea1d5fa7098ba","sha512":"4a4687d6d60329232e1814990699f202c7c37dc3ce844d6a4223d3d439dc4b52bbaf1d2a97431b94b50bb77a5a4c0a923b71e84cfd82be6ef07f8cf2370c53e4"},"length":1144256,"released":"2020-04-16T16:03:58+08:00","url":"/insight-v0.2.5-11-gdb23d7e-linux-amd64.tar.gz","yanked":false},"v0.2.5-12-gd146786":{"dependencies":null,"entry":"insight","hashes":{"sha256":"780edfb4cc1161c45f0408e7511a4c4e468b0e9c15e1d2bea2ba74042c8a3c1e","sha512":"719f858aea419bd7a30318c73c6a16eab7c7dc86fabf915672a09161d1fe6b5b7ec89c1cf201eafd69bf6fafaea4c5706673f0732b77a793d35403cf5d21947b"},"length":1576139,"released":"2020-04-20T12:42:28+08:00","url":"/insight-v0.2.5-12-gd146786-linux-amd64.tar.gz","yanked":false},"v0.2.5-12-ged951ef":{"dependencies":null,"entry":"insight","hashes":{"sha256":"cc978c61fe660bd17d46c8ba47560da6bb36c7587c14e56d3512cf28b69dae1b","sha512":"716567c5a5f58aec6330bbbe717215f8e215cc0b3e5ad8377ccedc29554aef478025e9754c7deb64da840ff7d493fe0842426eb1d9eeda3fdaaabdaa7135cdaa"},"length":1576083,"released":"2020-04-20T13:25:41+08:00","url":"/insight-v0.2.5-12-ged951ef-linux-amd64.tar.gz","yanked":false},"v0.2.5-13-g51b3cf9":{"dependencies":null,"entry":"insight","hashes":{"sha256":"58a0629830d7ba25816ad60f0856e06e95c7bceff11ce2ac0f22e310725f6eea","sha512":"26673c9a1587b8223b9041e91f6db5073859afd694ab0e59c819ede2e35c40862b020b6fc4d95f027ec89a454c3a3aed74056d434cdb10e0e343275e044374aa"},"length":1576078,"released":"2020-04-20T14:23:53+08:00","url":"/insight-v0.2.5-13-g51b3cf9-linux-amd64.tar.gz","yanked":false},"v0.2.5-9-g0456a16":{"dependencies":null,"entry":"insight","hashes":{"sha256":"69c064e0b2d615abeb6fdce4b97ba9893c7f79cc619f7b0f384e20b03c33788e","sha512":"14c952b94fda1f8b4ccedb720230c8f393c7bf57cb92a6050e2f71a3261e90eae3eccc4f3ed4e5fc3ae8d5942614bffdef50714070a23120b43051a255993625"},"length":1143924,"released":"2020-04-15T19:00:23+08:00","url":"/insight-v0.2.5-9-g0456a16-linux-amd64.tar.gz","yanked":false},"v0.3.0":{"dependencies":null,"entry":"insight","hashes":{"sha256":"bb40136d88da50f95cd0402684001635164b01edf338310d0150c2e1dc7c3b33","sha512":"e21fe1193483a93bba7c8dc05505b5b77ed7e288320fd85cf0983ec9c184a588383c68ea8497bd3b4cbe735886a39186e382c30cbca86bc406510eb3a5d4acd4"},"length":1576462,"released":"2020-04-22T15:17:54+08:00","url":"/insight-v0.3.0-linux-amd64.tar.gz","yanked":false},"v0.3.0-2":{"dependencies":null,"entry":"insight","hashes":{"sha256":"9e6b26b0ee4e7122d084d72b455ed6d1f0b6ef727523152b63e6ac062346792f","sha512":"8039d235f1466bbe7ea9f3ecd7a58fbdad1bdd8e2c442337674319b20eef2ce89a2eee195a93c2f0951028d2de84b9a4fb067a13b6f3770187eddb5ab53939b1"},"length":1576550,"released":"2020-04-21T14:23:21+08:00","url":"/insight-v0.3.0-2-linux-amd64.tar.gz","yanked":false},"v0.3.0-3":{"dependencies":null,"entry":"insight","hashes":{"sha256":"cb6ac0432c9326624b2d43018c87865f54a325a3298a33908b9bf69e93e6247d","sha512":"5b2b9e85417d82fd3af16b0d49539c503985ac683bc8df042e63dd0c929904c3972bdd1bbb5302db6d59fd283a4425733874adbe85645cfe748b815821bb8fcf"},"length":3789407,"released":"2020-04-28T22:29:41+08:00","url":"/insight-v0.3.0-3-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v0.3.0":{"dependencies":null,"entry":"insight","hashes":{"sha256":"8130163ef6bb9a9c7054bf9d24db83708889f87541fb03a7859b91d65cb77519","sha512":"92fbeb9e0eb0dc63117c39b14172ba07ee3ba4cd3f255802e838fd275b8edd9e612b253f8c92f66cb448c105b8dd4b9d5e75c2fa8e207a0afa2d211dded6865b"},"length":2967211,"released":"2020-04-22T15:17:54+08:00","url":"/insight-v0.3.0-linux-arm64.tar.gz","yanked":false},"v0.3.0-3":{"dependencies":null,"entry":"insight","hashes":{"sha256":"5bd731a10d069a240ffbf1c3825fe24f1e331d492fe8d93d78d9133a60de98dd","sha512":"1b6677a432d599c5c38402749652313343d3edf80d00b3a09d9185c8c7bad77d490e82e52ccf49a0599f42eaa217b71c26b171c9434a2837c710d1fca883466a"},"length":2975094,"released":"2020-04-28T22:29:41+08:00","url":"/insight-v0.3.0-3-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.mirrors.json000066400000000000000000000043321505422223000241110ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"YyzWGVN3vrx4aIhdqpY8igNHufKhSwV46/DXVzlSMKo9Lgj5ZN4H0cAZiglHb443I8sR6CiyY/z6n4yLGjaUt4h7hOuUVvpP717yyc007C+oXEAO8kzrll+6MJDbk68NySenRd9Slsiq8UOyEfr9w6hFbiJ7iA7iJF03kRC5JjS6TQr6eqrrH7VIsYgekcfAhf6Yf3p5GxQveYaoavdNL9OgU2cFQ6+VFlL6e+LZeo3lAT1GfE7uPqYAdkQygbcZt+C5zPXvia18I0Zg2vWMavg26abgYl1pIGNd+owlhUxbClZg7k/AqANUrIZknF1Wpz0nRgKU/i4vWlUTrx3bhw=="}],"signed":{"_type":"component","description":"Build a local mirrors and download all selected components","expires":"2031-10-21T08:19:55+08:00","id":"mirrors","name":"mirrors","platforms":{"darwin/amd64":{"v0.0.1":{"dependencies":null,"entry":"mirrors","hashes":{"sha256":"49c576e919a720fb3f6034504f0c4f0168c0c3e02f0e781eeb25ed9a8f6e4281","sha512":"331740f9c928ac7c98f08c7db66e2d9b4f58e7f929a68adb29a4fa8560a86088cbe5b4bb46c9a92afe4e921f28e85205588726ed417d4f12102cdacea5570c1f"},"length":5522493,"released":"2020-04-15T22:13:50+08:00","url":"/mirrors-v0.0.1-darwin-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"mirrors","hashes":{"sha256":"abc955fb2316c8208ef2982895845422d413e0a23580e20724b36fa54da30282","sha512":"b69df60ebf550c781c2987c612e5972b9933ee07dc6dd2d44c3e99cfcc4bf5ea07fa4d97dcdc4fb9fe94e62ab3995c460d587c9dceaaf7894ed48d2f8132ef4a"},"length":5435571,"released":"2020-04-23T11:27:18+08:00","url":"/mirrors-v0.0.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.0.1":{"dependencies":null,"entry":"mirrors","hashes":{"sha256":"e20cac9a226a9183de95c590f9e26f53a5e0a95249a38bd7ceec96a06f61a5e1","sha512":"bf1e47b5a81f303efcde52bc5c13a239eabb70ace83fc2fd1ee97556fc7188a1488e49fa9719973f2f93b48a0b63bd58330a92358dc0181af3bcfe4c555962a6"},"length":5522493,"released":"2020-04-15T22:13:50+08:00","url":"/mirrors-v0.0.1-linux-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"mirrors","hashes":{"sha256":"94525cc1d27a6ecf4978c58f97b53e560cc7e5045bdd1fcfc8d11562db04a64d","sha512":"205c5ee3d9d33ced3588ebf3e2f7afed1e85bf7f2a805960c02072cfc500e63b30dbce6c833f410eb04b493a0ddfef722a1c2cbef30f3dab1a74b7e6ba8501c4"},"length":5525082,"released":"2020-04-23T11:27:18+08:00","url":"/mirrors-v0.0.2-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.node_exporter.json000066400000000000000000000036451505422223000252770ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"rK3cGvP9nFOPzlIK0WnhPXqraJiE/zWCGmeWQcucq0iLcxOMKQGZ++b9ke+23HiFQ2ERqrnNec2IY2nm90Dx1LtBXpkJuAUpOZTbYfW0VDN4TW2txkNZRYCQL2mmmRoLS6kH4VHrPsIz5U/XY2rPHi8Vmx5yuwEWYMF1eOS40QQ+8Mixe19n9CxsAqItnFNLueZQETeigmJFtX+HZ/jS+vzUYwJdSrACmuyxDpkDv7tkO3PMwT+p8N65tMGf9sOHinxfTI0GXCAbrJro2ViRNLd5fJhUqTV7W4jGFKb3qQ7TWS8uMrLcJy4pB68P7ET70IBsf7PhRd7SoO/9x9Lxag=="}],"signed":{"_type":"component","description":"Exporter for machine metrics","expires":"2031-10-21T08:19:55+08:00","id":"node_exporter","name":"node_exporter","platforms":{"darwin/amd64":{"v0.17.0":{"dependencies":null,"entry":"node_exporter/node_exporter","hashes":{"sha256":"8ce9f492af99e2dd24ed0241b744e34d63a5199fb0e9ce5c595933b8287eeebe","sha512":"217a099745cfaea595b819d4a7252062e0a2858450a0a8718f3e9d621aadfae6c05a3148724f345fdfc3d7a4bcd05fc75b6f62e176fb6f97f238555f485b0fd2"},"length":4178999,"released":"2020-05-20T04:45:46+08:00","url":"/node_exporter-v0.17.0-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.17.0":{"dependencies":null,"entry":"node_exporter/node_exporter","hashes":{"sha256":"b6fbba3c7d62746e5476db55c68ea5f399520a0439cc3f9c6bbf939df6aa5519","sha512":"31285e819d56107dc0635954da3fe108f7d5455727cc8d83da4421920df56575b980f4ec8bfaa6e08a379e59699cfba9464b1f7165a9425aac8ea51f4be04f94"},"length":7018078,"released":"2020-05-20T04:45:46+08:00","url":"/node_exporter-v0.17.0-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v0.17.0":{"dependencies":null,"entry":"node_exporter/node_exporter","hashes":{"sha256":"3267d314945869b26bcfafb042c8ae9a002e8c7870fd8182340a758fe3522b58","sha512":"54a9d43eb411b38d9b9dc688226f74c9e041ae2f0ad3737c81029c940034a9c249685f306fef73600b5e6c7ff99b9fe7f698a2641aedf0afafe7753f5e8b5fb1"},"length":6498130,"released":"2020-05-20T04:45:46+08:00","url":"/node_exporter-v0.17.0-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.package.json000066400000000000000000000073131505422223000240110ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"gRu70BEPZkS//NNNNZkBZ5Uutly/OGrsq8ADtaamZml85TlzFBtG3gM9G8nIuqrk+3a5BK+CFPWXeb5TNGEUhRxfNp9Rngi5PowpomDN5yFYUZxcTpgp3yaG7ecGjJy60QDLWgAor6iqiCAPpwPivR8Cfjlrq/ChmMSoQl96nXSbYSXGGdxO/EvPP5P+B+B5E7YQ2mnNBfb6wkTrRyMcQ/s6S7q8mgzey/zL9M1TfkOlPFRBQBJQOykbeXldaNsmxaB9fzKkAsjUH54N9ECtjnB20K/jL+4oTHFP8GrPXJRfqqb7TnEAtigzc/ygcUWrwO4kWte2OazAI3vxpKMbhQ=="}],"signed":{"_type":"component","description":"A toolbox to package tiup component","expires":"2031-10-21T08:19:55+08:00","id":"package","name":"package","platforms":{"darwin/amd64":{"v0.0.6":{"dependencies":null,"entry":"package","hashes":{"sha256":"d10bb3285de66a55c22c1700b47450507a1302d1996e5f42a7321cf0f08ad154","sha512":"f79a6c10a57b7af791f8ec4b50e706225af0fb09f868f49240031de3fa3b662ab3d536cdc7926d1b2f2965ddc291e4fdc4633fabc690687400c1b8538eab940c"},"length":5533317,"released":"2020-03-10T12:00:00+08:00","url":"/package-v0.0.6-darwin-amd64.tar.gz","yanked":false},"v0.0.7":{"dependencies":null,"entry":"pack","hashes":{"sha256":"9f028bccbde290f180454abd81b9c8e20db1f8be50d53dc95db4ad963ea0944b","sha512":"fa94e2337f55b746259bfa67c93a97630fffb9abd9a0226d2b928d1455fc1559f90a641e6127d4d7ad29e5d5bc8c00856f39dcb5b15a7102bc6d6cc8948e6bba"},"length":5657090,"released":"2020-04-03T16:48:39+08:00","url":"/package-v0.0.7-darwin-amd64.tar.gz","yanked":false},"v0.0.8":{"dependencies":null,"entry":"pack","hashes":{"sha256":"95f3a5a11a90efa8df3d1e3f31c6d85ddc9b6498e3ec231c052d19f470472a4d","sha512":"7114cbaee972609a7d7a5d167669ff4e0b8d47ccdfc01616c128972ee90d25910b496c08d05cd7bfb39b16235a420338dc22a7cc9c868f44665b9d29752efbc3"},"length":5659221,"released":"2020-04-08T21:27:07+08:00","url":"/package-v0.0.8-darwin-amd64.tar.gz","yanked":false},"v0.0.9":{"dependencies":null,"entry":"pack","hashes":{"sha256":"2647ff0770eb62d86964eef464889e913d7316689b7f5d5b3a9b9d8a53de179d","sha512":"447c0c1a312f9c512e4ea0e2e8c4a08ff826608f5e4b9c7797206c0534745dc49e27b7121336bda43b181c28896788ca543cc68e6515358ed37a1c480f65ed17"},"length":5667243,"released":"2020-04-18T13:23:20+08:00","url":"/package-v0.0.9-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.0.6":{"dependencies":null,"entry":"package","hashes":{"sha256":"c18f50ece2c94936cd0555894d5600ee8d022a6ae59abe5a56b3b455bdb44492","sha512":"fef67440967b5889671df811527cf8a45cb3a77daad86257e78188b558671327d2474d4ec57473ba444cd77e996fda9cd2edae9de6daa71297f069a387755dae"},"length":5582714,"released":"2020-03-10T12:00:00+08:00","url":"/package-v0.0.6-linux-amd64.tar.gz","yanked":false},"v0.0.7":{"dependencies":null,"entry":"pack","hashes":{"sha256":"c4c96ccdb205651d2d0ceadb47a9c5dcc51cfc627ba79e658e9ec0d2f1e77196","sha512":"85cd7e8a17be2d2e124903d7add10c220d5caedeeee1e0ec426307450a4307e2a94b1151f89490e90fd3d10e979e33cdafe7117c1e162ea83f60357a64d9f1a9"},"length":5810345,"released":"2020-04-03T16:48:39+08:00","url":"/package-v0.0.7-linux-amd64.tar.gz","yanked":false},"v0.0.8":{"dependencies":null,"entry":"pack","hashes":{"sha256":"79f7e694170075363fa47809bcdc5ad6184455dd2c9cbf42302d6f4ce0e8794d","sha512":"4d2cde00379fa8ece72e2d238e7b4876cd6ccd0f6dabf3673ea1fb4a06e57ffeba11be6fb9f4c8369a1da4f1d8d114cb1b9ddbece6bb29a8a1dbf48007c984e5"},"length":5811663,"released":"2020-04-08T21:27:07+08:00","url":"/package-v0.0.8-linux-amd64.tar.gz","yanked":false},"v0.0.9":{"dependencies":null,"entry":"pack","hashes":{"sha256":"bafcc74b99c72510e1e5638ec2b7b7883df4873ec741dad62ecf59db8c4dd047","sha512":"b57b581d07ac0b75e7648e86f080d62c49457ef1f3455bed90c24032ba1b68f83da075466dcb565d69f3ab130db5be71a72e722565a276b1411c543b3bc8e955"},"length":5819867,"released":"2020-04-18T13:23:20+08:00","url":"/package-v0.0.9-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.pd.json000066400000000000000000000611741505422223000230260ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"Ud1K+V0sedaoqBng61zWzqdhTWpny5c25OeVZgSdu5oc3/hfovo57ZxrmAYduKJLWecVoG5HUsNFpPVhMgXGabALjXPE0r0lJa8QK3fHLTIUh7DGM0OiP8Z5zgpOh1vF9ijR0g7ySDi2+AJpBG7PFHKOXaAo/UjVx99J2LUobU0O3qyBFEhb/aSIRr4tj94kFd7Ylh0+Tltqcgqtsuas8d9Fj16gSZ74ixuo4/tJrk+LbilBnRWrrJ45LAZMjPpsmu2NBGb4HY8A5ysEM8bZiPj4jpbXSwsMH8zsedTls76BrGjcDW5WkheHUcEH3i/EoIruK67PiPjWLO+r68qvdw=="}],"signed":{"_type":"component","description":"PD is the abbreviation for Placement Driver. It is used to manage and schedule the TiKV cluster","expires":"2031-10-21T08:19:55+08:00","id":"pd","name":"pd","platforms":{"darwin/amd64":{"master":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"07126ccbf77107c3bdb8cfdcc7d1a6e0c50c3a534d7176f99b311dbfedef445c","sha512":"b4f2d0c53eeb1d531d90d56bbdc1683d1ff912c644a1ceecde48b48f3e5f0726525546bb8175822f20714a556dbe83a281d3da601d299041a46ca1b08c55eeb0"},"length":28940751,"released":"2020-03-18T08:39:30.648812457+08:00","url":"/pd-master-darwin-amd64.tar.gz","yanked":false},"v3.0":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"946e89c06488477238fbac128969635bc8bc3a67ba9f192bf90b6069db0a8659","sha512":"2e04ff32c179aa48153c38d55eec9c72e4e9814296d0490491d7217ec2341a3ba0be2349c524a929d4cc8d7790da46e28efb842547391fa06eb7f20382e501e7"},"length":16042623,"released":"2020-04-16T16:58:31+08:00","url":"/pd-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"a0ca8f8a2bfbe9360d976ee4c2b5f83d6cb18b65085447ede53fbb4c17275525","sha512":"45009525d6da05bcdbd1f636894c4b06a86ba9f40dbc7b84d33812497af5c4816330a63b934007d09473a78d9943957d7eac1bbc41586377576c5290230e0565"},"length":16412650,"released":"2020-04-16T14:04:07+08:00","url":"/pd-v3.0.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"e688fc5dfcbad5ff23756ce3da36923a68d2c1b8909466e4fc5cbddcaec92698","sha512":"b429ebcfefd0870c472404ffae06bed8161da7dafc689f8d650b4883db726622bf1054faf15b098a6f5b80a93f4da7b25cc333e3882e230f49d423f4bfff1a3f"},"length":16431622,"released":"2020-04-27T19:38:48+08:00","url":"/pd-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"e025ad430c35d008ce701d15750b634eb1cb8e6a52f284df1738e2f4efe90907","sha512":"1f3907e7d60635a81811b35976b872673e17a2f05b370f27fd2ed670e6c8944891a2ff7b913a00d2c6beef0a74035e9d470cd4f54d8826a4c1bc65cbeef163bf"},"length":16308753,"released":"2020-03-13T14:12:17.469446048+08:00","url":"/pd-v3.0.10-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"fb6a8995806951e188048503656c1bd3df2b73e8a8799ff5d9006da894d5e7ba","sha512":"74ab7c9cb773e5570382416bb08659a57aea75d59062708fe19d828a9c8ca8a9fb32c15915f240f86c1efdd0a7b38be6f6702f714f6eccc9b7ba2c412b5b88ff"},"length":15898603,"released":"2020-04-17T01:09:36+08:00","url":"/pd-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"9cc3e666a102f74f5d87d47061d413707e147166fe6fb15fc8004aa00217717f","sha512":"bf8f3a89755e31e4309dce1338ef5c61ec6f6cb02d75e24335bc5aebdc30cd5f49d931e33af85d1bfbbde827b764c74fd302fbb266d1c4cd68ac6c5e37c07d40"},"length":15899260,"released":"2020-04-17T01:16:21+08:00","url":"/pd-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"13be822b879385236cb8b2a865d759633d77183c761e455486804a3de0988d2d","sha512":"c3666812f005f9130c860df27d8d88b5da871d0bab143bb66ce6b48a57b5d7ffc7b0399ef2a5528bf1af192bada9da4f2149ff2f975d89ae9c4ebc9bc42005c4"},"length":15899255,"released":"2020-04-26T17:25:32+08:00","url":"/pd-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"e7f3de02a71c85c2b35b00c60af769b4d5f167bfec7a5820d196b2cb44c4ff8e","sha512":"1b252a635e018eda13bd8761986a8f9421938e12a16d076cf00618ba3c1c0b2375ee94fcd9fa572d63ad1b4e483f11b7d62938989df19a534b45998dbf4eab96"},"length":15899249,"released":"2020-05-09T21:12:00+08:00","url":"/pd-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"866835f6fd87a481e96eb2b913a722b19a7bbbad4b63ddeb63f85b77ce73138b","sha512":"1936923714e49e86d86ac8c26bfbe6c236b2e4cd32bcba1345f789d05449770217ae73c5f208a40450876578658583babcbb6ca71506ea27a92ca982c266b157"},"length":16435546,"released":"2020-04-16T23:55:27+08:00","url":"/pd-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"9eb79498331baf4f5d5d1ce8a84ac35e2da248ab4fce51591e7d6f093207cad4","sha512":"673d2ff20a5673dd96006c91b60397540eef876412ef3e4402a8c7f9583c18da9e57eba9802f759e0891ddd797201c8b2137bcb0c63d40ce34df1add1c5dabce"},"length":16438372,"released":"2020-04-17T00:16:47+08:00","url":"/pd-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"3f3c8eabaceee19306e0850fed6811f3721fed56f44338f7ca2678a564119854","sha512":"41c40103d915b6329d30d6241083c27e2acd7f48c0f0dec0ed312fd4628ff8c56a2ded09355cb5100a39dd19b37970e2bde3778617f5bde25a6988bde785fb14"},"length":16439703,"released":"2020-04-17T00:23:01+08:00","url":"/pd-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"59350f3540af9bc91b8dbff2a96bf4854a2c73da8abd69d69127ec858ab7d55f","sha512":"4866f9efb24f0e73af62849aecd7c48d4e90d9fd292a8e7a9f18bc67b9eef9ee34e1b93b1203ced0d8183bfca749931eac642abc4c345f4c5b624bfe36d45b71"},"length":16444934,"released":"2020-04-17T00:30:01+08:00","url":"/pd-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"0dc9a01f52ff80f8712e072c14d0f135bedef6f453efa3c2d36e69bdb1977fbe","sha512":"ea3c5275c8f3d03c7a61321f23bc3adaf40d1cb4ec1396ec079710788e9618571868621e0c49880a330b2a69ca28b344cf5f954e635e55b04f272dbcf7e22ebd"},"length":16467065,"released":"2020-04-17T00:39:49+08:00","url":"/pd-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"8f5104b2d3756332407aeb704e998a3e3e89fc1d8208af912bf9505b19b0e4af","sha512":"69e3e13f0fe5a8e5c3f6ca0adefcae364c1c6e46b701759b4ab389f73a78b5a6362173d7a5711bccd51836c4fc0604b4e957811c8022ac473c61a69466d55d72"},"length":16466994,"released":"2020-04-17T00:46:48+08:00","url":"/pd-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"14a52a6af3e8e6c1eefef69f420a3924b4c67a4f751041d70b11962d597d6aac","sha512":"0cc8aea1a34c7f22a6bb0d2d79f9a0d75ed1a6c2e80fbed61ebefb1572e353dd826d9fe409f97366dd669fca6604175baa2e76665c2875c759efa39e0695a0cd"},"length":16299511,"released":"2020-04-17T00:54:38+08:00","url":"/pd-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"b08347ffae1dc82301940fa7576df8708177c995da1831bf9b8a9ed0d7668cf7","sha512":"7f2ee9ca7c92acb16b56cf2b4471384f154990f1172c164f81d339f330f4d81daa14c9052886324c156d8c76959d492b9345657ffcb3c727bc6c85125cb4478e"},"length":16300361,"released":"2020-04-17T01:01:15+08:00","url":"/pd-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"fa0ab38716c99662218abb2292f31734ec0e508ab76860dd54fc0f683f60613a","sha512":"6d586315ac8076e03f0b18bcf309cdcbcf50eeb4f7fba6623a4b1ec345d78db0015d8dea4703622e1b0187403970ee0fad563ad11189ab65075957aefe60b353"},"length":26677656,"released":"2020-04-17T11:09:46+08:00","url":"/pd-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"d27c7f93bf57f171232c1319034db1cf51e418ee7e62c50a7c1c3ea327d3d914","sha512":"4f5107cd6262e2ed32f7ee80489a87ee383bebfa7c94fbaab2a8b1cacfe1dc9d5d3d439f24722723db6568909599996e944438b676f097679a36ed7295fd3a71"},"length":16794709,"released":"2020-04-13T16:08:27+08:00","url":"/pd-v3.1.0-beta-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"5a40be294da31f08fa1f93ae60e3d0c9777e70dc340b5982f17b7f88b3aff284","sha512":"724b551329ec5131c703cf091b32da6f4fc2f299f44ba0777f34ad864276f11684bd7cd4f7205256f899cd4bb9ee0845397d7ae0a02a8c3982c1f661cb01780f"},"length":16311970,"released":"2020-04-13T15:45:55+08:00","url":"/pd-v3.1.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"ef5970dd039e2540eec1a25972bbbfda7ad18ce831346641b2e408dd52e9a229","sha512":"5b971f0a5f780fe944d64799e3837fecd6e7aba14e31c1645b8b2d56faf2a24d49dbd14bd673e70b5652002d3d9e353ed812ad4c7f05cd5fda6b2ae09fc72d29"},"length":26621069,"released":"2020-03-19T00:49:03.948679159+08:00","url":"/pd-v3.1.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"617f10be9a490dc6e581396c8cf76551b540b2cbe665d3b9b0882ab2e5a1557f","sha512":"3d8ddb145810538585202a3070f5db14b4e87d6abb1dd55efc60bab82fc9d1ecda5139e594ab92c67a96c2dc73a030340d771424ab6182b9a2d5282a87fb24a3"},"length":26648424,"released":"2020-04-02T23:43:40.334838645+08:00","url":"/pd-v3.1.0-rc-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"02e97c11e7e2617d54f21bd3afa6a9d502b8c9d5a339d91223c3df1b2df414c7","sha512":"211b07261b99f9c246ac9dd51311c353c0d2f19b91d72237cbc726e95584b95086f6e35de21453e33bf4a7d10fb3b6120cbd3d3ad9574c61cb1f190abb4a07a6"},"length":26692327,"released":"2020-04-30T21:02:47+08:00","url":"/pd-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"191d4942b61ccfcd67e47f07060dd7a304a0edbf1752b464242260b3eff99615","sha512":"09b6a53359ceda6ae47c67f4201c7d62e4c9b8be45294d62fe2150779e80636f24ebe08cbda5c0bf8aa8f0be13502f42e006d09a2722d3a94aebd2bdeaea865b"},"length":17266658,"released":"2020-03-13T12:44:10.728852547+08:00","url":"/pd-v4.0.0-beta-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"ff65ac57c838e717669be0938b3b5d4030f20941eb1aadacccdfb68f78e5eb7f","sha512":"f0c28b17641c715f9c21d87b039803b20ea86b158e70c42b732374dd7035a94b07d4fc539f2ed4522b207eb733a8089e7ef8af2f81e287a2459dfbb9907d570d"},"length":27289578,"released":"2020-03-13T12:30:23.571584675+08:00","url":"/pd-v4.0.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"91e60c64986ed8f2e7f49425e933821e7b8bc781093f3bd2b2cf97a5de1a6c15","sha512":"615ca6d2a1770d31c861a0d7a2ebeffe9e440edd986b2adac872fd90654de9febb702f76069e6978a38444ff35abfee4fe570a2757d33cb360738ff1b9d47b85"},"length":28954199,"released":"2020-03-18T22:52:16.949320853+08:00","url":"/pd-v4.0.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"5ee2bc87a6cc8b7a31f8ab64093c5917a64f90e795fda9c3ec5656fe8e74a390","sha512":"b6979f4224459ed71ad1a492a68e145eb140f651114a096250a80a228def108675302c36760f2278088207280cd2606590cc109b6ac5c97e658b1ef2c9a17ae8"},"length":33997942,"released":"2020-04-17T01:22:22+08:00","url":"/pd-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"55a50bef462eb4223f88d06fe17c9c73d1bfd6ea59597e632efb3f584c2febfb","sha512":"1e49a9375be68d62f55f532a39a924033f2145e5ea4d18b4cf482c3b38ebdf1186cf2a8cd40a514ba1c71cdb6e278175c3e7a60b5d8d3ce42851263c36dcaf9a"},"length":39720830,"released":"2020-04-29T01:03:52+08:00","url":"/pd-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"3dfe77da303c9f52538188690af4638b153e8075342d0d38cc58ad8ec2336344","sha512":"f0138fd8d62275c85a6ca6e29bc9ae637409bdeaa67faf756cb8b2568a817ba75f874736a2753de1fd00aaf2576ecca17f7da95155cfbf1395896238bb8c4bcb"},"length":39952529,"released":"2020-05-15T21:55:08+08:00","url":"/pd-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"a645849091082e2362caaca56709b606059e8a45396668b6317bd87f8a555b81","sha512":"6b4800bf7cb9bc50eaf9d5697236dedb14e4d64e31d7aec1e013c822aaaa293fe7a92af5fd6b6823aca2553dfa634983779901fdb2851ea892943b4e199ca7c0"},"length":16507938,"released":"2020-04-16T16:58:31+08:00","url":"/pd-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"fbf5c5a8b0a001738380113b58cad9e40c14747f606d1e23458b2740a9afbcda","sha512":"64156cfc8fc1b19e54ae41e8f44f6db7698a949f011c7f02f388c9fa307c8e0d2979c7716ca301d050608e7c08359584e4345991c330bf66e98631d7923760bb"},"length":16462432,"released":"2020-04-16T14:04:07+08:00","url":"/pd-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"1fd4aebd3aacf297cb1320db611760ee2222c5f06500ef8eceb69efd0eac5a6b","sha512":"5cc51a088996cf15d134f99e11a2fabf2f69548146c5c487dad5893a33e3a8e6aa689d0871c98fc1353c0ef4c0de34b19af209da0e4295cb56a53f01cb28cc63"},"length":16474921,"released":"2020-04-27T19:38:48+08:00","url":"/pd-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"71d395235aa7a3b2df2cabd431bbbf0d12c12808b6b77f8a153a973a911110a9","sha512":"6653442443502bd9e0751efa90bd352fe30a11b0633e30e8d4141d8096feb73a56fb0036253f797801f82ed320cbcd2589bd090ec739dd4b83307178261dc5a8"},"length":16101474,"released":"2020-03-13T14:12:17.469446048+08:00","url":"/pd-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"34a986ff21aef47563ceda7bdb59ba01f68eb79fd0923ca31b721455dc141929","sha512":"f168da7406e71b6f7ab693bbee4c71ebf5e0a51fbdebb8f335f48811d39ad0b5f9837157f3e41176e9e2018db602df0417ad94304f8215001966a2a052ec1d1d"},"length":16102892,"released":"2020-04-17T01:09:36+08:00","url":"/pd-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"9ba7e220d5bd35258d0f2631655116bee8a954290cf9a4ae23143311b1d24c2b","sha512":"0f65180b33e1de916dd2ecafa9368103b6f9b9b34b7a9e5335219479475e65d24310534e4cd106a04c22bad84c12f04d39a3ae15ebf08abf9234ebf208046914"},"length":16102390,"released":"2020-04-17T01:16:21+08:00","url":"/pd-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"2a9377ad7568bf9462e643401e4cd9abce0c4429862d67132481b5abd415621a","sha512":"c154d16ff0072483ddce63ca27fddced8bc162723231b1a482b0ee3e97ec9ccb21b80b9a273c6b641c8d1e0ca0bc1e912479a5ba3914120fd1a2e5292b6c57c5"},"length":16102335,"released":"2020-04-26T17:25:32+08:00","url":"/pd-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"b9565c9c92101397b3eafab4f7340767a5e34a11fa10cfb338f3d73da1e4c7e9","sha512":"74e49c9eb75468e41e2e8a45968eccc919288b4c4e1639dc6eb357f16ef9c825dc1ebf69614eef897a9feb91c182b840548fe4b1bd40d21815ce2e04ebf5d0be"},"length":16102386,"released":"2020-05-09T21:12:00+08:00","url":"/pd-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"cc4be2445777ff74500c8fc23a23506abc4f9ed40d422e994a6d3b60ac6533f3","sha512":"a8f9fed9f1844e132b5b91fa870455456e56c60e3edd5acbbea929f57dbc2cc07116abc6d74f109a6a4cb52eef959a49df8872f6f6ac23d1b69c8a7cf2c9a142"},"length":16484016,"released":"2020-04-16T23:55:27+08:00","url":"/pd-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"4c72f9b68922db5343d2d63eae67afe436fe130b46dcd86422b4cd6ae7c63de2","sha512":"39099424efdde84687692c0c6ce04d847f66bbd9c77628336c9d6eebfc7671b59a4fba4db96bfdb47a4a84c2a2208de48de77782d1657bb9eae2e669d3488f6c"},"length":16483176,"released":"2020-04-17T00:16:47+08:00","url":"/pd-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"5a8596347079688a9bc5e16a1af954ede9a4916c981872bad4bb694faac2ecc7","sha512":"066bc590d0a041a0044a424227fe2059e455d7db68996d90ae6dd1457cd080aa9a28753a5f7db58c5158dfda7bf56fe888fa27b2f005c2a2cf74536711e1fd2f"},"length":16488829,"released":"2020-04-17T00:23:01+08:00","url":"/pd-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"9d3cee82f55c8186aacb7f3c476f0544191e7671e3601696309d52f6a2906be5","sha512":"72ddb27afd91d5e1778bc902cc4ca43fb9f254b0c88c8fb10796df5c2a92a40e5199b2f09a1095d10f6e2c3fb3bb6f8bc1bb5d0368002783408d911746cdd289"},"length":16498657,"released":"2020-04-17T00:30:01+08:00","url":"/pd-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"fc45012bc3b081493a15c2d4eee3ea2c7aa895b53fc48ce3fa906ef4d0b20bb2","sha512":"32f263dcb6be0bb91b15f4ffc9e1fba98ec40cb78f291c1d47c7e56fccee0c03ba845f25fd86a8d437d1d03c583649587c086775152a4249f15b8c55b88f5904"},"length":16510837,"released":"2020-04-17T00:39:49+08:00","url":"/pd-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"a0af23a59fa982a49e80b6fe1a275f6fafa916f466a7435bad111020580af9ee","sha512":"7c0cff79eef95c3b59afe01a94de1fe73f81354964718be396f8667369c0b09a43805981583eff0f2e3cd84c6fcf5c3289cf666c3db18f4b3a4ae1c73931e590"},"length":16507938,"released":"2020-04-17T00:46:48+08:00","url":"/pd-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"c47ebb46f371e211edff7ded04fafe9775cfad2b05b5ddac070d9639e15cf3a9","sha512":"edb4da08479432e6b67f3287046bfe1649812f6ac2ffcb686fa8470dae7e612099d6a8ea10e54a7254a412dad907d46fa83350631655f8603928ea942b87aac2"},"length":16403162,"released":"2020-04-17T00:54:38+08:00","url":"/pd-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"a9a4a84ee4c3d9283248f8d1f786701721fb36d831143017251f0d316722f271","sha512":"b47a1e05dd27f3413b0c218958800d8a0ab30dcd67cf9da67032fc7140a2287ef826f295d079106354b161068229efa510b1e36feba2f3672b7b366e6b1082a7"},"length":16404355,"released":"2020-04-17T01:01:15+08:00","url":"/pd-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"719dffe2aa2e3a2d70587ac0ab3bd509f36c19248e4e676e3bae1d0023d09c0d","sha512":"df10391c74f1550798936b761bedd1f8efb0ef57de3c225636949d648437111d557116010f0f9ab91955d2592ac7127ac2256f22894499d5e23cd9039f11ab0b"},"length":25888458,"released":"2020-04-17T11:09:46+08:00","url":"/pd-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"a4e5a3b9d67e3fa7884493d3f987c30d6c410e0834ee3fb7c824251348ba1364","sha512":"ec53a20a803d902d04b3211cabcf8bf8619b8e9234a657b8aabaa43cb23b63396960db80ca8f67a4b3d4f32e27a0ff0d0c81a77ea0be4a0492aabf38255b5a0a"},"length":16851736,"released":"2020-04-13T16:08:27+08:00","url":"/pd-v3.1.0-beta-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"b7780fa6e08ea28797900e1b9563e705405033f1f3cf799b2fa83e5960f0bee1","sha512":"fbd65c3ee60966abf03495fb838262117f7a6570657014069d2e0032e648f82c4f9a480eae2e61b4e70676c96ff0c6cf686923d396c11d08b5362968e52b4c04"},"length":16409973,"released":"2020-04-13T15:45:55+08:00","url":"/pd-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"090a2413e826cec566d32d9602805f6fcada1031ff48688cd45ff0e77f33c940","sha512":"d8c94d992292babea2af86e4c70dfebb378f758fe0c89f6eb79800d971d012054c91d5728545773eca1b5961822a54567038634f37d578fe61b91fa65d16d090"},"length":25812335,"released":"2020-03-19T00:49:03.948679159+08:00","url":"/pd-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"9bb1701939b56c6476bd2c75e77e9b3bfdc4b2465f658a6fc6e57892f3e58f22","sha512":"0404f4a7f2432df327e6e1a8904deb69bae8cad59469e7476b7313eb1ff869a334aa052bbaa4ff09278cbb3cf24c55859490eaad869854cab276df4672a97946"},"length":25844640,"released":"2020-04-02T23:43:40.334838645+08:00","url":"/pd-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"40d736b03fd2feecbdea799e064bec7a9363f81493960d12af3e955037d52bd8","sha512":"849e70b72d160128d196858c891864c085854e6fbd275460e214958bdabe05587579fa3106a635ff65952071fd79b2ce692366cd5e1a1438c3e23774648ef73d"},"length":25913313,"released":"2020-04-30T21:02:47+08:00","url":"/pd-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"3c269c011704fa0632302ad266e53c08e7f9e2466cb5b4d0d8c66129992c5d81","sha512":"ae8f29981d07864d677c3bc6fa478825f6f879ee5edc9563fa3986e45343553ea3f4455560935263b83b4879f6fa52854c201fea2b11b7aec9d8557e98de0267"},"length":18054979,"released":"2020-03-13T12:44:10.728852547+08:00","url":"/pd-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"3c73107ceca1dfdfb3d9d5a83bc86a0ea7bb08def06313c979c203c0ce4bcf6f","sha512":"138cec65208167774221ccac50294535a90b0fd7a9c9abf142008c3531f045ba09e9b112f4877888cda52ec4b4b012df4397b6ae9563294f42a52e29d1c7c0c6"},"length":26481506,"released":"2020-03-13T12:30:23.571584675+08:00","url":"/pd-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"7da7dd640dd0b8d7e741f7981fe953d5dd9b45a10296d320ac8d88da5b8aa10f","sha512":"55725514872cd78fabb71fc0cf12b37b3de1f66c1277134a055e340f38450010bbf6b3ef90841ec5050e39eb2b415452af2f26e5858f3e3559df94d4e35a3bf9"},"length":28190692,"released":"2020-03-18T22:52:16.949320853+08:00","url":"/pd-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"8b7b4a6221422e2906c9f1fe8e41e60d4a5264c857ae384da3dbbd28b50b53c2","sha512":"740887ed87d9cb8b4bc747723c28694addec971273bf22e9bc677ecc045aaaa15e390dc07df2fbaf3841a7ff157dd6b6de248dc2333f3cba27b307aa053e4c3e"},"length":33220772,"released":"2020-04-17T01:22:22+08:00","url":"/pd-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"3f43330f130cf448509e1e315b1227830dd38ae43c16de3bc658d16631b5113c","sha512":"1602e1a9146cc46b0fea41932c48c53e724f7b6fbacbb9f51fb5680fd0c61fa84f3d592ee8d49bd97c42dca473cd665c21489d7d361f2741c159f0471e7ce2a1"},"length":38940376,"released":"2020-04-29T01:03:52+08:00","url":"/pd-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"df23662dbd397dc3433e22fea6b6816f7536c946b3bca4848e36e76dfabf660c","sha512":"b0aa9359d58a010ff3c704a2125c152f23f5a442efa6dd63ffc5341a2f1ff9d418cf656d8f16c8580d46143af9ab4206bc9eec7f719ba19778bb7e1303a41470"},"length":39172434,"released":"2020-05-15T21:55:08+08:00","url":"/pd-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"fc5083f87a0f5001e3d89411ad8e6f5c73ad582b0211137d8c509289e60720c7","sha512":"df2f9fafafb511ba9db8dfa051b1fc59f928c4ef4c33cb1d59810d7a4c08ba5f1389c7d37bdb8eed19ccafd2895ddc37112e59e24899f2968c882901e4707d90"},"length":15075162,"released":"2020-04-27T19:38:48+08:00","url":"/pd-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"554f25572796c00bdc7c6e7164ba4ff8d8266b062aef1a7f8a06437b94774511","sha512":"1d639545341f607c11b437805bf799b44693808038e799cebdf44bbf51803e83692e3c775517b73d5f223957b40d9c5652d38bd128e8532648f06e91a4ef2b55"},"length":14982915,"released":"2020-05-09T21:12:00+08:00","url":"/pd-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"6750e3ebd15e984661f40c698889ebaeb3e188b21266a1e7031c7ca597c6c1da","sha512":"f8bcab93709a0d98b6b8e4a24f5d327fa242058535755a0821b21620c5b65b6f259a57497aa902cf382ab21b4fa8d64683544c6b899a6350c035010b199d285d"},"length":25052043,"released":"2020-04-30T21:02:47+08:00","url":"/pd-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"6cf371b0c1299e3a88de60e0626ecec650b9e2600c0ab5cb370d50cf61b69dc0","sha512":"bcaefa230285642b515754f28b44e505a99a2b3a30a7b3fdbb902379ff858d3bcd4e44f115a8231578fb4ebf89217173bf0e167b28c928a7f78b8bd0e12162cd"},"length":37838489,"released":"2020-04-29T01:03:52+08:00","url":"/pd-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"pd-server","hashes":{"sha256":"5166f68fcb2e0c5c45b6cf533ddfaa71191131f4e87934dc9d72433fedd4d3a9","sha512":"fbe1436e2ef1ce2bc104916db7627192e346b7a63da56555ddd56d1494e65e397d166ccc947123bad205bfbaf87218fde79f3a0ebe098f7e21bf5b16c45b1939"},"length":38086415,"released":"2020-05-15T21:55:08+08:00","url":"/pd-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.playground.json000066400000000000000000000172131505422223000246020ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"mvdXDLZuRq6kWiInl9+vvmSUmYdJJ9xE+CcUW0F4d5k5PDGjXzM1PUP1kOOIFmNeiYk0kI2Yto4/wGeZh4LeJNPdFERCtWeQl5CpubNUm8MeaV+yNXFtrM2BZhKhbvSRftJdtGdpB1MKSN7U3eMfiozQmu2FNoF4bv21YRCl2Morcu3t+ejxq97mYnuhlvLP8hBVEYjzwHUagVAeQ0NlV63PGPRtd/b0eoXX5dUz9JFpd+FjOkyAaZU8nQwdjH2jIL7FdfU3yvqg5j6loVslP8BWeCwknsCk5oAIxh2miKLlGUn3Kur3Dj8dOG+xTKkCt63UOme3YAzw+Cnck7Kejw=="}],"signed":{"_type":"component","description":"Bootstrap a local TiDB cluster","expires":"2031-10-21T08:19:55+08:00","id":"playground","name":"playground","platforms":{"darwin/amd64":{"v0.0.1":{"dependencies":null,"entry":"playground","hashes":{"sha256":"56b00249788fd57b3f72d255c28645e30a785273dd2727d505af0dc0c63186be","sha512":"5a204bd6241eb2c90e928cff30bebb7c9b8f86e02ec6857aa03aff8ddf793fcf76b15b748ff0b5997e76a6f400900d705200ce1b88574afc52d9a940c0b44c79"},"length":4512618,"released":"2020-02-27T10:10:10+08:00","url":"/playground-v0.0.1-darwin-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"playground","hashes":{"sha256":"8566257a5cb47d4d4afeffbef1ca6195ce61ad41af4520ca9570a868d5edce5d","sha512":"e6d9ad7d8baf7af179f22d869eb1cf3794de9ce2386b80ca06d9455e0cdbd46712eab0f3cef80addf8ccf9f76440a75f8814af206d9bd2d584e4775cf4de5a61"},"length":5772656,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.2-darwin-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"playground","hashes":{"sha256":"a497333886a7dc87d0372ed5991c36d75b6ddf16117f660c4fd2ac113b349521","sha512":"0ea80dbdcdf6aea567bea3f1f14a901cb185fc7e1ab06c50428129f72c64e06b3d038478996f43b76a5f3448044dc8aaa35ea01e0215242145d4a9402b0e862f"},"length":5773284,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.3-darwin-amd64.tar.gz","yanked":false},"v0.0.4":{"dependencies":null,"entry":"playground","hashes":{"sha256":"47bb7d1e205b5ffb20b8b627eb105d8e2181cd1ecc3849d1421765775ab5d2ec","sha512":"8d9e1370072f509dbd7b9cacf85b4eb2c6347338ed3144778f6558ea41eb71228b9d8a3bb4df273e9f06989e148c9fbb4fcd593bc8b78aed5fb7d0c9506f2ff6"},"length":5781893,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.4-darwin-amd64.tar.gz","yanked":false},"v0.0.5":{"dependencies":null,"entry":"playground","hashes":{"sha256":"2171d830baf003b6cae256d2e70b5834809fb4faf83fb1ff54cf9436d64afb26","sha512":"0b919d68df4dd68dd636eb7f3ae75c9db9dfff3d29865d99b8196c25ace3379e7c89ced7219f53eaf1da9af00edfc34a97378e68c972d189fac8d5b9c525bd27"},"length":5794626,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.5-darwin-amd64.tar.gz","yanked":false},"v0.0.6":{"dependencies":null,"entry":"playground","hashes":{"sha256":"d35d1d6f865b1287a212e17574ba1f7116fe9f519a51891faedc298f1ddf0ad6","sha512":"9f8a8f44951c56437d879396a0b2d93e9410707792f0575d85a2643876cd8349ff77fcdc518b640a0984dec029c2112e8c897423939dfb03bef472119f65d8b1"},"length":8768112,"released":"2020-04-03T16:21:57+08:00","url":"/playground-v0.0.6-darwin-amd64.tar.gz","yanked":false},"v0.0.7":{"dependencies":null,"entry":"playground","hashes":{"sha256":"e718a9bd2e1fefad48daeb0ee56702db24074b36c7bdbf9cf4cfd416c4d85a2d","sha512":"ee53bdfb8a6d01e5904b9f8b1feb49ae3b46b02a13918a1013559b6c00a0ae1f9cdf6c965f3f7015b924f1ea220abc39cb11ec63260a2db7d8a47bc0e9b1498c"},"length":8772581,"released":"2020-04-16T10:37:27+08:00","url":"/playground-v0.0.7-darwin-amd64.tar.gz","yanked":false},"v0.0.8":{"dependencies":null,"entry":"playground","hashes":{"sha256":"894de97b6c70cb04b046035c5821c3a6d61321f1d6ae64829096dd87a4c6183c","sha512":"e5253c3e18215e14623111dc908011efe82144f8e07a792cc955bd0aa78fa2ec2b2dcc57ae3c634b820dc758a47495d8d736e1e5b9dec00bdeef8356314982bd"},"length":8801141,"released":"2020-04-17T15:58:08+08:00","url":"/playground-v0.0.8-darwin-amd64.tar.gz","yanked":false},"v0.0.9":{"dependencies":null,"entry":"playground","hashes":{"sha256":"6abe45b9fb2bc7234d01be804e1ad8708aac208fb7a36dfde4b54e2fa63d0963","sha512":"a84795475524c0286a8102474b1a2938b8624b941660232e12d273d8d968e65eec40993ce9bfe1e5be362652f8e50d8e05502e9c48da2f8ca4302fe28bfb4851"},"length":8800246,"released":"2020-04-17T17:45:49+08:00","url":"/playground-v0.0.9-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.0.1":{"dependencies":null,"entry":"playground","hashes":{"sha256":"d7531f7474a33ab3e83c1d522348357e7fb846bc2a604bd03d7a187cdb3c7267","sha512":"58c218afaa41a358ef9db9822788c2ee525178483a48181356f8ac3ba74209db1cde29fb32021bc22cc801e3d64e839f635e4d678c9fe390ac760333c8be321c"},"length":4539820,"released":"2020-02-27T10:10:10+08:00","url":"/playground-v0.0.1-linux-amd64.tar.gz","yanked":false},"v0.0.2":{"dependencies":null,"entry":"playground","hashes":{"sha256":"beb164fc7d89c7c8e0fed897ea3e9121ad4d22056efa873453edbe96d93083cc","sha512":"a8fd34f9739f5bc84a35df1e3d68f35376c89aafe1280ada5bcc7f1575551335267dc4ad68b62d35c6be38ab2e9108420d4335ad803d73049e92a0f3baa172f4"},"length":5858022,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.2-linux-amd64.tar.gz","yanked":false},"v0.0.3":{"dependencies":null,"entry":"playground","hashes":{"sha256":"d14aab6699c1ebc12089f8aecd2dd7efdf350b10a15d6a762c873a287345ec5e","sha512":"9c832b0e0746d4e50b02a058fee048c0adbb58f212ececfff9bf1dec26a0cfe8e17f87d3b8d607ef159eddff560271787fa8ca206cecea22d330ba5e70a89f09"},"length":5857790,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.3-linux-amd64.tar.gz","yanked":false},"v0.0.4":{"dependencies":null,"entry":"playground","hashes":{"sha256":"413bb83151e5b59b6466f783ab8a7d347ebc841cc4b8eb8843464602738ffbc2","sha512":"9c5263822cb3c4e4e05c1c665eef7d91878139c4bb6dc118b65000a08728b2413e96eec277b996ef6840f47cf2edb7a7e4cad322860dca80e265ed76d01a6d6a"},"length":5869758,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.4-linux-amd64.tar.gz","yanked":false},"v0.0.5":{"dependencies":null,"entry":"playground","hashes":{"sha256":"d940ac6df42755807a126fc0a552a307a55340793fe545c18370def268477b2d","sha512":"f72b92e458c6c8ffc99b892082690bd09c8519820a0332486cfbc63c6b05badf10ba98fd2ef1f75275b60074d1344cf82d5bebd57ab190c0ba6c4dcf031d206c"},"length":5884863,"released":"2020-03-03T19:39:35+08:00","url":"/playground-v0.0.5-linux-amd64.tar.gz","yanked":false},"v0.0.6":{"dependencies":null,"entry":"playground","hashes":{"sha256":"0e0cdea71083e0356da0d0e059f2d8d696a118895c0d023bdee392f468032bb8","sha512":"1be6c43b40a20d9a73b8e31f010ab948f7fd366739b1485588aee229d934684f42c87ec028d2848c151e49b1209c8eacb16addd27d72dd59c06e76c523074b82"},"length":8937370,"released":"2020-04-03T16:21:57+08:00","url":"/playground-v0.0.6-linux-amd64.tar.gz","yanked":false},"v0.0.7":{"dependencies":null,"entry":"playground","hashes":{"sha256":"be32799fbfe23cc1e26db4142e439177365074a6ff3f61a06e5cfdad8f3f39af","sha512":"9502db41510b00cc758741d86821b714c027ff8048c92b7e78d783d913d9261b48a6ecfaf9af705df2ae7b55e599cd00197b879c9a45aedafa00f96a43e0c348"},"length":8945379,"released":"2020-04-16T10:37:27+08:00","url":"/playground-v0.0.7-linux-amd64.tar.gz","yanked":false},"v0.0.8":{"dependencies":null,"entry":"playground","hashes":{"sha256":"fbc9a8bfb9a1bca8804a228e724e718d5efaaa07061d6ceaeb9183201b24eae8","sha512":"b3e37080498a9d308fbd9afcf18f6b1977b9a011c3655482c057ab6192a15f52a70f65ef6c9e10726c52e1f2b9e408ad2b610a0ac0eeb7e4b1f107de6c9f7983"},"length":8982353,"released":"2020-04-17T15:58:08+08:00","url":"/playground-v0.0.8-linux-amd64.tar.gz","yanked":false},"v0.0.9":{"dependencies":null,"entry":"playground","hashes":{"sha256":"c9b9e6f3140db6902110a2cde171c86ea0367dc1a65c2f03ea9e38c011370773","sha512":"70d5e1dec81687d50cd5327c113109a3bf2ffb7bc7a796b885d160c35d65877958830041084b8e65a08fadc4bd52a478fc8d43b415fcd986e0d16a85fb9867df"},"length":8982763,"released":"2020-04-17T17:45:49+08:00","url":"/playground-v0.0.9-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.prometheus.json000066400000000000000000000633011505422223000246100ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"JnMtIccBu9N4ezAO+c0zMXnVLW6NTew8F5BW/9vb+w/UkWgJLRUu/6hUdc0wV5reasws0AUQlDlBru/3N+qWxWQoPcDpQu2d4Z3k/thDr0p8bqrQPwhg8JUZHi3WmvdlWocRsxKaLzYWSEBeD0Svt0zeeylJ+lDRyw+QfBgW41DbsfQ1tJfOzVLaK1hGYURt/xoXILO2dAijIVUVxjXvPcxORiEIzBKUjozslowgeITg2MiojIkZmZ8C5/jdhC7H+k79exfMlpRG2S/GzCtljg17xdLDVfFOPn4Rsk+0us9qjJsf9W3RSyvJSKPEkmPsTnzBJiCdNr6f+arv6UK77Q=="}],"signed":{"_type":"component","description":"The Prometheus monitoring system and time series database.","expires":"2031-10-21T08:19:55+08:00","id":"prometheus","name":"prometheus","platforms":{"darwin/amd64":{"v2.16.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"f27ecb71f43b082661f152e605f62814e670fffbc0e25b75bdc687ea4c8e9f03","sha512":"d56379ed707f379eb2057a1995df920339bfc825e13cf1c1ecff122b5d704d659db851cec37be56e4d967a14936140d84d43b94d0e73ecaac6b34a5bd8912369"},"length":59257676,"released":"2020-03-31T11:45:59.883169246+08:00","url":"/prometheus-v2.16.0-darwin-amd64.tar.gz","yanked":false},"v2.8.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"325e3cf724935e2751a7a3494e6a5a4ede7196b65524a8ddfe6650dae80026e9","sha512":"80f7ad404ec95b171c71751770b2833b9bc42d1465cc06b2cfb0712935b4799a242c64af4dd7303a79e8ea5fb8051645e18c04a474358903648e5d9297c95c81"},"length":41707089,"released":"2020-04-03T16:32:31+08:00","url":"/prometheus-v2.8.1-darwin-amd64.tar.gz","yanked":false},"v3.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"9aac21b3fb71209804e713f96871f8312bdc94ca5086b0bd3c36696b77afe5d3","sha512":"f69f044faced2fc89549ce8feb739d4c5365b26c154cc962ba19c2c1c0f8e33fdc1ae867e3a4cd88a5853dad32556e127bfc8496828d5237636e8c05d892708e"},"length":41704082,"released":"2020-04-16T17:06:14+08:00","url":"/prometheus-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"d981a2af85faa987dbfd7171d60c9b460042174049716b31cbd05c01f964a6a1","sha512":"2a447a993b73c87d36612eeaa24b92a43a4fe06ae93b15517f0ecdaf38b9e875855ab902e2b68130191eb3c1ff04d0bdef1e12cf7726a6dec8d617b7067e378e"},"length":41708175,"released":"2020-04-16T14:08:41+08:00","url":"/prometheus-v3.0.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"7b7355eb7093a030001ebdb628008fc4aa70b80a0eb2a41e793832fadf319ccd","sha512":"0672d847e07d3863565157ffb1e6cc8b67d4449ecb479dc31c96f337e1010ecd61707f55b5431f3abf1c3f499f08071bd9ca2158fa5ea7cc3bd7b8fe5ad4c4da"},"length":41709481,"released":"2020-04-20T14:35:57+08:00","url":"/prometheus-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"a51b13881f7b7a29e3f7d1e4d8f5476632c9ae47d39c413adf2f0a7a00400104","sha512":"b89c844999bc0e5a0bfb837b0df0bc4210dfc398acebc0fb66b4b9bd73faa32838b506b2994672bd341db194af5a07190e3988a386457be869cd48d683100639"},"length":41704082,"released":"2020-04-13T16:14:12+08:00","url":"/prometheus-v3.0.10-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"eac01a4c2ab74ac35274a3d663cc364868d0e6e195b61717a37ebae1bfa7ee58","sha512":"c3ed7bd63ae7f8b6cb41ca345fa73b438d9b45ed1a2452a3e9a004c175b81f47b986b232ad8652718688b98700d72684853acf6e336c941a65dd6f8cee4af94d"},"length":41708035,"released":"2020-04-17T01:14:04+08:00","url":"/prometheus-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"09953183fa69e4880d3f088d217aafff66d885816a552b22a4047fcedbd153e7","sha512":"2b608cbe77ef10a33d47d6965b856f0245bf6f7242ea2004e51227cc2888684f037eec032c4b89b557cb1b774ede573e82f86d8b901ec1191135a0f679a23837"},"length":41707983,"released":"2020-04-17T01:20:17+08:00","url":"/prometheus-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"03842b73d3c7f5eadf1f23e1b1d9223c6ec88ca50effc4427076e6a7d492b1ae","sha512":"57d25d2e27763708c93b6a7e6422592237bcf330a84095db8e43466b6bd26ac180858da16dd3b2be39804b7f7dceddfcbb1908284875594284d50a29f4c187cc"},"length":41708260,"released":"2020-04-26T17:39:19+08:00","url":"/prometheus-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"b8503f190eb525e922da95917752b236e363a40de9a2e2c5d4a79274eda33add","sha512":"28f16fb8e62cbba6e7f459187aa89ce8a83e97bad6eabfb77f01fd73d244fe2e8deaf8426064e663a00c5044a8e527b6fa40d2251f1227b310d965d21e931ee6"},"length":41707873,"released":"2020-05-09T21:15:42+08:00","url":"/prometheus-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"c71da3d83169812a0e0ff73c25b96e464cbf21a2058b0b2d3de7717958eb3c6d","sha512":"779890936f06b9cd9b7e47d6eba67b33661edd7806806c16a3ad88a37dc5fecef55e76ff5fa07a77b023e5f2bd45b3ba7edf3835e77d3f3063c4363f06f76c08"},"length":41709715,"released":"2020-04-16T23:59:34+08:00","url":"/prometheus-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"614e2b36ce3b1e3ed7d52131a9b6bc3be1c7d9b840a0e42e8a062e3487c0160c","sha512":"abc993775e464e1513320f858ca6530bcce8a659382fa26f7016b76d83f92a5e26dde31d5e0c5d7d6e6b1f053a70b56afc9cc7aaec1436aa99bfca83bdceae9c"},"length":41709668,"released":"2020-04-17T00:20:51+08:00","url":"/prometheus-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"c940ba69854541fc1ffabbb1da7c55216ec6a14e756e9a10a68908b813299106","sha512":"54fceedbc61df4090db364c8ef8ffd4e5bca8dde5ba3b31f073aa8dc27ba25737c11666d14d9b98a9b1ec2b62d1d483947117a2b588a3c539813964d6298aade"},"length":41709750,"released":"2020-04-17T00:27:32+08:00","url":"/prometheus-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"0d32a7045b8b733754636eae45691ba70263334cb211bfa84983e7b4654db63e","sha512":"b421ee0fda9343f24b5fc88504a77246b2327c43f02543cb4e9a393ff95ffaf8b77ccd71815dba06d9c02307d809c73c4d8fe30a8bdfff3d0107213a6e523821"},"length":41709715,"released":"2020-04-17T00:37:30+08:00","url":"/prometheus-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"cb8e44fe6200b097ca0dde57aca73528017295383193d9a98a9f96a8c8871feb","sha512":"2127707e83958fd356e7705168f5475afd21e9bc7ef35dba4b3e4d5409ead393043096974998c67f29349e760cd3ea687878545208f85da14910f3ff5b0605fd"},"length":41709772,"released":"2020-04-17T00:44:20+08:00","url":"/prometheus-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"81844382b2010b1f369dc2956566e9f0aa53bd80b080f3b1c811e8c771b3e0c1","sha512":"50fcf39df2b3e364b13745926cd94fd50e8c1e44bce0db40d6d936308da783f138a790944cad18dbad331d136c8e9c1981f758b2e0895a830997919b334eb6be"},"length":41709882,"released":"2020-04-17T00:52:06+08:00","url":"/prometheus-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"2a9482d630e664676ea929b986aebb64f9851158d8feba57ff7dab6b2e644d27","sha512":"14bff31ee7ddd4ddd500e6d64698c2dc56ed8731ea9d900f550194c58a8e93613cb684c103db80ca03931f6a288ea3747520099c16911c607a96ec5e62939ec7"},"length":41708191,"released":"2020-04-17T00:58:49+08:00","url":"/prometheus-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"6a5c3d793328bf6173fb5d19db8db8130bbd795eaa7b52cd5ef4c62e0896d48b","sha512":"695b98153aaed7239814655f6372d7731a2995f14dad2a727ffbbb9a83197cfc941cf6ba54b3b3a047a418c4e357d38b9f523fcdcdec2d59e369b5ce2f574e7a"},"length":41708240,"released":"2020-04-17T01:07:06+08:00","url":"/prometheus-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"e0841801116ce0e4ae2bc159c1e93ba34db7c3a0a1f9e3d1797841b7cddd3b0b","sha512":"ddf007bbe4317a52e5347067e42a8f1799eb6a7303e13ef368c6280917ab82a76ae8a2ff949c08caa8cf1a926f44a40e0d6b9bb2ab87dce6d5e1dadb37442b25"},"length":41708111,"released":"2020-04-17T11:18:49+08:00","url":"/prometheus-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"90ef5f1d4e29392aaa2076df0cacbb602bd3bd26e3a184aee58ab7d45bf6037c","sha512":"2502ae89e12a51f546d188cc35317e647ef9d06f3c291230d6cceadaa21e6a9542b7d35add4f2a48e9837f64eb29ee5aaf5044c38a1c52a7c369a7f3fa146955"},"length":41708025,"released":"2020-04-13T15:50:27+08:00","url":"/prometheus-v3.1.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"42a2140735abc639fa3f2456bbe2eadd0bf2082b35ff9b2698a17beda4f84cdf","sha512":"63a7909c5d0cfd696f941bbfd45d5173a01910bdcd87fc6e7b59d3aa120c07848f4c46ff0d5907284b0f7898949cefd109e6d071e572db34e2cf09fcad41f407"},"length":41708493,"released":"2020-04-07T20:03:57+08:00","url":"/prometheus-v3.1.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"75b042439c88556a3e46099fafc7ea56aeccae426fd1214b5228fd18d6c85dbc","sha512":"871f92ce20d9a141306c56b2a06deb2b5cba9536c271814114bec768fdf64e3184cbf451457cb1f8987809df3cb7c6aa9920d125748b46445f4c476f09a7343d"},"length":41708155,"released":"2020-04-07T20:08:43+08:00","url":"/prometheus-v3.1.0-rc-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"29dac7473daf33351eb42c0ecc232f1c376b705884ea1c9911f6deb22866face","sha512":"5b18cf76f778e55b5a5d5a9287c68fa7d68f64fd4f34c003d83b74c0dc07c3f308fe55d7c8e1d3f87f0b112957ab224bb8e2bb8a2cfa830bf9487a909113c7e6"},"length":41708596,"released":"2020-05-13T11:31:45+08:00","url":"/prometheus-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"4ad9890d81292bd8c324f8e1e8f55a9853b51d47320568e7e4b9aef0ab844868","sha512":"46d671f8327f9657fc4c41d640b603492b899bae756a3302c728630c6e946838504d6fecc7799647c4eaebb0e09d4d2b49562079c6993b3aa7530595a134afd7"},"length":41708018,"released":"2020-04-07T20:13:06+08:00","url":"/prometheus-v4.0.0-beta-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"262cc41cf80530b186feaed4735681d7989fe2dea33aa188f7dadfb9fc87c77d","sha512":"9c42de283c5df3618d56b957b667fa33421ced3c816ad20aa4665af4eefc9b881e2b4349ee25b8438bd5d2acf0003498d543368a6224779341481473c1ac627f"},"length":41708817,"released":"2020-04-07T20:17:38+08:00","url":"/prometheus-v4.0.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"016f501a6d82b23e188b1248b346c7e65ce3e20c41c2eae0cf0522b4d4c02051","sha512":"61f2d6cf9c9f0e31bff2f3fbf54ed37a08f2c9068ebe4086bda5a59235786f24a2f61204e17831d555b70e28a8b88f48e50d54b47ad0be3327ef446d79a1cdf9"},"length":41708752,"released":"2020-04-07T20:21:30+08:00","url":"/prometheus-v4.0.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"6e9c19d0fac43e3c99d99f73d429dcf2be929d07f9bfd77efe5b5ce011e5b91d","sha512":"1ccf04db42248c86de1b5e29f6c5dabec203c3141e09a352a8152f0e307e1ec4ec9db2d5b70fe85cac62d6af54001928372b88be35322e1601f9d8b6d4d8ac69"},"length":41708198,"released":"2020-04-17T01:43:30+08:00","url":"/prometheus-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"75571dc879d908395ba8d83c4e3f4cc231e3958bd7227df26d94802e7efddb03","sha512":"7b3d051496e0f6d3a82015d51a24803d8daa52873fc6ee3d4dc9797f302c0d9e99b16628c946bca8aa4b48960f4f61b850ac5414c46fa17222022e7275b49674"},"length":41709776,"released":"2020-04-29T00:35:15+08:00","url":"/prometheus-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"c91f75d2792bb72e59bea433f3aa22dc941d3706c890fce3aee07a0b7669a30f","sha512":"b87f15eaaad2325231e78b5351ae01fd55c134764b9295dbe5278b88be52500bd273cdc3595d38785788d62c6fc3166b143f6f6d28297bfc0e627f48ffc2f91f"},"length":41713230,"released":"2020-05-15T23:36:16+08:00","url":"/prometheus-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v2.16.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"fa5021c5e3bcbbf0079a9f5ccf61a2e673e17585e7a0df19f82ca542ce8106da","sha512":"4731ce70fd6faf4a452f41e9c12d061a7b95939d4e8baffcc70a7b64d711dc220abaca87b579bc2bd2146a4242fcc5301033c00b5398e382e4bf559a5edaf258"},"length":59555665,"released":"2020-03-31T11:45:59.883169246+08:00","url":"/prometheus-v2.16.0-linux-amd64.tar.gz","yanked":false},"v2.8.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"cb1bb9ac40f80b3ceb5e0e4ae6a01054cec55c8800b34c8383da12caa71f88e7","sha512":"9d330317982383f8b09f923d621ddf8735743eafdb11f3eba7deb970427ea809981ca0790109b20c452dc649d45c943d068587bba5ff0d9a5d23c8121249d06b"},"length":41775711,"released":"2020-04-03T16:32:31+08:00","url":"/prometheus-v2.8.1-linux-amd64.tar.gz","yanked":false},"v3.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"67f69deb70d25a44588eaeef77822cc1ef9b76f8f628e03d41449420ffbdcebc","sha512":"2e94abe9ac8f5e7e000d8d0550cb7ee53d3188fc590fcd07f42d85bd4b59f17cb9f6b5d13d4b0168da3dd94fbe876a66d315ef0b2c857c680b296119a53e567a"},"length":41766469,"released":"2020-04-16T17:06:14+08:00","url":"/prometheus-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"2c962ed4f10096cd8d8a624e79cf7eff4fdc0f6e1c7e0e9bb7f8d163705920a4","sha512":"49c4638a71a6d5e3fbad3583d427c5045181f127799afa1fc064525d85416d1bcd89d1b62729a497b6dd06813a85dbfd865bea4b102a0ba7fcb5803357679d73"},"length":41776984,"released":"2020-04-16T14:08:41+08:00","url":"/prometheus-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"0672b3b943e33e477ea95f50297ba13c616b98b40bc826cd97a68abb55d859f1","sha512":"6c78c0320b8b68ab530b70abea7a1aa5f2b5c283aa0d5a9c3a185a649c31cd37acef0475c3e2097d4035aae446f384905efd71981fc85d42509aa0deb4f627b1"},"length":41777344,"released":"2020-04-20T14:35:57+08:00","url":"/prometheus-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"ea334b464e211f65a89e99ab0b72d9449ea5132a632bc438aab348a942837408","sha512":"87918efbc1b75e6302b5407cf0b0b07912faaad9264e26bcd4988a93780f3e5ad815efbf5f6e77e5a3235717705b7253dbbd4c93394bcd1aafaac5d673b82466"},"length":41766469,"released":"2020-04-13T16:14:12+08:00","url":"/prometheus-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"0179e49ee1c304de02dbb0eee32bbf0a8dedde4a58854493c3f689ed6faaf73b","sha512":"c2120e39aee6a24cd24772a22cff11566e7cce3f3a0a8d4301d2880cc5be8e81bba6b663f2180375cc41fa08f41bca1254a103b7e17c9d1c728d9425cd952408"},"length":41777104,"released":"2020-04-17T01:14:04+08:00","url":"/prometheus-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"607c902bf1a86faebb761cb3c75a777939de700199ce4543f3a6ce4080f39d87","sha512":"b56db130cc88a181f45a052952340580bc2d02c4a4f90228b99bbd5d3d77f1506c6475673e35527e82f9615e1c3a7e804a519486e9e14ab5d9295f1bd6f89cc0"},"length":41777101,"released":"2020-04-17T01:20:17+08:00","url":"/prometheus-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"65c9bb137cb52b3e72a7dbae6b03f900d139227a7a3243a668b20597d999e67d","sha512":"e0fe8075ecca096c1211ac22eed0828c267be3abf337a2108567424ebcabb2d0662461a812f11bac1637bfb1e1f266867b6491e302f040469321bae7efa66597"},"length":41777000,"released":"2020-04-26T17:39:19+08:00","url":"/prometheus-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"73c09d11179a09a42d9de09e74b760b6fc706a71cf71c65fa81179cc76f094db","sha512":"27baeb6f5a8eae52d4d6b90c73e8e1ded0ebf7f6749debc48c0c4c675860b798798180c84fbef62aa65e1fbcbce8082c335488b6ca95ccdb304d692805aec3fe"},"length":41775745,"released":"2020-05-09T21:15:42+08:00","url":"/prometheus-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"84cca0ae33d900b16c359a5d52c9222a3fb1212792348283980005906be9efa7","sha512":"e0addd4d09a7be8a8c12e84dfc81e81e40f41cfa4d3e55ff5715473b9861207e9ba822b3595e50e1a0b8013084825c1e31054d94970acb37b694134d5fc7655d"},"length":41777010,"released":"2020-04-16T23:59:34+08:00","url":"/prometheus-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"4f5cab9fca01bccd24133236b5ced3f8fb2460216f0bb6be10fcb9552ee3e6aa","sha512":"a61bdf031a163a5ad283e60be0966fe4939b64b941f0fdf644a8fd2b6e435af5ae14e7f31d15882b02ea0bba26fd9ff92a6d756b4cc8daef1e44d56e6a1808b0"},"length":41777667,"released":"2020-04-17T00:20:51+08:00","url":"/prometheus-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"ec5fe641445b119752b059c31dcae3f0cf6defc359bad2882b19b6536faaf516","sha512":"b39e33e5fc5f2eb4f9262f4f3c00b1ed1cb7b02a338f3dbc4b4ec99035f81896af0fcf04c0f391b7852af13857c5a5a2650c0ea080d996f72ad2e551e9222238"},"length":41777577,"released":"2020-04-17T00:27:32+08:00","url":"/prometheus-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"98f92f9017159b08aa5c391f43733c37318bffd04ecb7bfd39620f34147c25c2","sha512":"14c5a9fa6d79beb400b29314000798ee507fc3161948aca978e3775ae461e614db8770dc0a7c1f06333d3717e5d3269d2e39b9254ab9a05f8c02610a620a09a1"},"length":41776982,"released":"2020-04-17T00:37:30+08:00","url":"/prometheus-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"2518c0812b955782bac1cb7faaf135f8c6f131bafcb1f46dd3a5db9469bdc4c6","sha512":"f66a0774e35156c21611ec2beaf579a0897cfafee896762d9c50b3791c9c73d6a3f7985e168cc55d5358496a0bda8132c16e8410bbb63ace67bc5fac821b7afd"},"length":41777091,"released":"2020-04-17T00:44:20+08:00","url":"/prometheus-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"937011488485dba250280383844bdd0c59676a2f2f2368b9d3b24d83d003973b","sha512":"bf52e60c0382cffca5c94b4054f6366362236c4c111cd8ae5082374dbe6afce41819e157247bb0868771bb33f3f1c5316792ab5b0cdba29f7e955d7b7a069409"},"length":41777513,"released":"2020-04-17T00:52:06+08:00","url":"/prometheus-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"b2da47372a4dca03205bb1f47946049bd1953ac2f663be4f370f3ec626422db7","sha512":"ae927908ceecb556cae73d2a41817f44925a386f779e59997113c19312eaf35cd3b478ab5d5579058471e262d606ac6b4d66f8f9976570f5fd1657b3f66c0704"},"length":41777054,"released":"2020-04-17T00:58:49+08:00","url":"/prometheus-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"cd38324cd172ed8f81945358bce21f6b0206719b5b8250fdbfe1b0e16b655f13","sha512":"6bfe16008094a0981d3d298a4d6976bf865492dc5e987f72e55acd5bf9ab11ee9eb06d86742fcd8f451d8d0d1456a9b175577713ea89cb15dd3b2c2879c88fb2"},"length":41776967,"released":"2020-04-17T01:07:06+08:00","url":"/prometheus-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"85925829104fb0a893077caa359eaa231e701a557f60cf03d96d8b9c5410de56","sha512":"1387bbeef6f744096f89e70111075c9d39f684f82e8479d1688e6f41111f8f42e1f38cb29578996b26deaacc2338dc44c89229c1d8c5dbc270852f22d1fc44b3"},"length":41777295,"released":"2020-04-17T11:18:49+08:00","url":"/prometheus-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"76140469acc93e20b84344c2cdf2cd87d666f1ab3c57c8358709f98ba2f33db3","sha512":"bd34b4169bf3cde0b65afd2b302fd87a59483afb3ef0daccfd19e88dc1c40c1d4ebbec4b6dc1e48097cf066fb44c02b669c800a766f432ceaba4f5357107212d"},"length":41777106,"released":"2020-04-13T15:50:27+08:00","url":"/prometheus-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"36c8a2aee27b03b9d26748388c47bc46901d379d339d03bdc9953401c0d4cbe4","sha512":"863cce69c35e98ca30bc018c27025e9dc30e11353556de257d51efb134f64eece4c19976dbc60486389ad2e2c03c2c6038bcaa0929540fb5fc9976170fdc1d8e"},"length":41777458,"released":"2020-04-07T20:03:57+08:00","url":"/prometheus-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"63938e07c33ecb175d2e94afa029c743c9105822e0d539be01e9d039c24f6d7c","sha512":"f5f13a0876b099bc66883c481f03658b5ebbd0b02b3fabef8d3c9fb7ad94727013f3f4a1416c819de0cf3da83c6bdcef570fdab1d7324f4c15a5b2b7697552d1"},"length":41777289,"released":"2020-04-07T20:08:43+08:00","url":"/prometheus-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"e02de1b3392a8bb52e003b351b4c074393e26eb2d87b852ea2527ccd3d816e76","sha512":"63230a349426196b6f8512dc67a516d926a70cdba004df9e8a9e53177b959ea61b9fd847ed0b0c51b1403817b3235fa1f9b5e1dc7a12d05c261a35e048acb380"},"length":41776100,"released":"2020-05-13T11:31:45+08:00","url":"/prometheus-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"35dc4c2d82b77e3a39be9e698c5a60a406fe7e5e935da2388e544bb5d2681b5a","sha512":"769c4efab24a7c61a2e77b0b526ac09241e4be4ad6d325cc6ebf307803b6ccc3e399046ac658db9a08f17e8530229578e1b926413d7cba10058676a32676d056"},"length":41777350,"released":"2020-04-07T20:13:06+08:00","url":"/prometheus-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"0caa58150925d149e5b521c422eef6af02989a91ad00c0568c6e07cee4db470c","sha512":"2685b1bc9d14780f920ee2a9185badf499e510cae2160b7d0760453cf5231a50b8031ade1fc7d5ed65d5cc49aa28a5c2da2b3bf6eae4a9b9aeb46ab114ac4161"},"length":41777464,"released":"2020-04-07T20:17:38+08:00","url":"/prometheus-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"3e244c3f2742788e8734f61d83324f07541e4595a7ddf7debbc6e8ee1f4c17c6","sha512":"bc6b44c213443c5a1963d18c7b1f1a4771e7804fcccc4b55f4a1fa8dd2de1ef045b7abef2ea16315811d5c0ede301855e5227d4928f30fd9d9f291a35e8d3ef2"},"length":41777404,"released":"2020-04-07T20:21:30+08:00","url":"/prometheus-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"739a1707c9c5893d1e8d9ea058a1558a69c28f7cd180d838d2642568586a3a7e","sha512":"a2bdb7f8ba73dd98623d1e7f16cb76576c473c5d8ce1749a8becbf9977e80ab07307602ea1f06dc49ef725542b962f4535701efb93ffcd0df39a05ab2163351a"},"length":41777306,"released":"2020-04-17T01:43:30+08:00","url":"/prometheus-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"87173e3a4a6ca00f289586a99904d01dcb71c26ed8906461144549dfbe0a45af","sha512":"043eca5fe80ccd1774f376fc043483fa36c4eca59e1a1067af3688f4ef8c8c33050d7c3e07719d074e77f6a066a5f7c765faa6b6f7c31608a2bf48264fc9f2e8"},"length":41779530,"released":"2020-04-29T00:35:15+08:00","url":"/prometheus-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"f7fd31fdb05b8940e08e345188ff0b0b77977dcbc604fa3ef43b3e3f80b93756","sha512":"647a2067702516011c7a9e080e1933bdb504fd8b936b8df4358c3930d0e636e406be80611acae618ddc71dbbc013b06a6ef0fee4aa7daaf6ec437e567f3867cc"},"length":41779526,"released":"2020-05-15T23:36:16+08:00","url":"/prometheus-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.14":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"060e1fc146b75266a98a457e56f9fabc87395342667612588ae906ac959905e2","sha512":"8894b6d9bb30d378f0984d06e14325deaae0a8527b84df9219e8445abb9f0c6c0d9fd86cbd5086c68fec5f75b10e1b92a00a5f6be863890f0f5462d8342ed8ac"},"length":38681638,"released":"2020-05-09T21:15:42+08:00","url":"/prometheus-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"755f0a64d54d475628b1766761ccd6572b97cdeae016b16e280f20fc4e2d76c2","sha512":"67acb822f239c233e8fcce3f9d4e0e4b677cc9230564af3f4f5ea6c88e83d8b1bf1f5cdd4d5972bf5320863b451387f82282f01a07f37a2205aff37959522695"},"length":38682967,"released":"2020-05-13T11:31:45+08:00","url":"/prometheus-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"850f2fa4a05a12e050015bad12409d07b33ce603dd3f4a70a5923091a57e049c","sha512":"99950ff4408e54fc634bae8fe759aa4ec210240aa9c21d156fd62e20a243d20db6f53523f2fac627e9f6369072425b12c78960a09db01d41c8d34fde414e0cd8"},"length":38687756,"released":"2020-04-29T00:35:15+08:00","url":"/prometheus-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"prometheus/prometheus","hashes":{"sha256":"8f30260ca6c9a06db9c6707b21d64b00a4e6fd11f5b386b6d1024b25028e151e","sha512":"635b0c3ac4c2f6cce85167b92b3539d6059ff46325cbe04b0355e92525aa8ba550ae39e4448e5cc9663f654cf59b83e7012e6db8afc993179a1781b710dc0a50"},"length":38690298,"released":"2020-05-15T23:36:16+08:00","url":"/prometheus-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.pump.json000066400000000000000000000473121505422223000234020ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"o7HE4LOw9BKuWZGai6EtSXaPRUPM4ruZ2gPXjJzZb9NdHSXYpqQGPxIZlg/IbraQnCjGq026eGvEURwnONjOHlI7cBUd9SOzqMRLDTvvM7iqI0dOrxbhKtYkSIcaTVet/qkMxC6KvgkcmccQfbsvSvzrqgXkopKtpi0B6mxb0zb1686BfgT3mfqfCDZtYvFwKGkkroTrTZfGF4bNfPfb1swwu6el7HjGYjahzHaEjy7G/RxUeq9GF3aqR6KfLBTVHoQRUyGtKVfBuS7msjm324WrUw1CWMc4A150x7neYhRpo/TF6/1k4Tcbj6QVPtJDhVpoPQEZfE2k4KfBTEJ1vg=="}],"signed":{"_type":"component","description":"The pump componet of TiDB binlog service","expires":"2031-10-21T08:19:55+08:00","id":"pump","name":"pump","platforms":{"darwin/amd64":{"v3.0":{"dependencies":null,"entry":"pump","hashes":{"sha256":"9b2408db5517b4185fd3f2be56327dc430e6209a9c6ac33a8a2312871d4aabc9","sha512":"f89d5758e76ad0ad0fe12d3c29ec92a56af3c019a625954d9c1872d8c2f9cfa8022ad5d598540817b35d7d7f91e2ed0d3a66ad73544f65dfa5ffaddd38d57d04"},"length":18801470,"released":"2020-04-16T16:58:52+08:00","url":"/pump-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"dc55ff5624246d442761c5d57a2131be09503bac8abbe358644f516d4ed35388","sha512":"e5821a53d76bad81194b889376c1b571bd9a3966689729d0e92d195fff02d26343bea407999e2053b5fb86e33089786142491081744e8d0c78ca0534a428711e"},"length":14086165,"released":"2020-04-27T19:38:54+08:00","url":"/pump-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"pump","hashes":{"sha256":"87b1062dcd7bcec4de3269481d906d81adc8dbf4bf23b5c34bad669b0d0ba9d3","sha512":"73ad22e44647788ccea06b4c9b163fb01d7a315b0d4658e90283803233497ed68806a351d3c7982809dcf0682379cae9054e5e925e3a5fc90586a65ae0cb3aee"},"length":14397383,"released":"2020-04-17T01:09:54+08:00","url":"/pump-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"pump","hashes":{"sha256":"5b394b144830b469b527455b7cd455d2832a8180b738081253dc58043ef16c6e","sha512":"552a1c71efc106d6c730e281cfc24fc433cfe222be237762b770556b70ff4adec508e95543dcfcd411db3e254cbf135b4d065723cd8e0401100a37a7acb9a19b"},"length":14397384,"released":"2020-04-17T01:16:38+08:00","url":"/pump-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"pump","hashes":{"sha256":"6a6073573f90fee88228dc9c47b9c1a271dc2dde3c5cc8c1544fc100cc40a239","sha512":"22687439fbe80bf9cf4f073dfe1c04a0e4b9aa138d705996d5b43eb73cda8aefa92d5371fb76f94aed4cbe9a306bbbfb738e58f42c928046c95ddcb0b4f88491"},"length":14398656,"released":"2020-05-09T21:12:05+08:00","url":"/pump-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"pump","hashes":{"sha256":"b49399805a31bee81a65f3108339c28d0bc7982ecd703d216cd26c5d2a96623d","sha512":"54fbe541bb9db1d02e42eef84499c24248e52120f86af4859521d8b360d757edbeb96277c1e470b475005f729b98e3d5ec2e50bf192f2e97b3b445aaadc1ff4f"},"length":14115494,"released":"2020-04-16T23:55:48+08:00","url":"/pump-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"pump","hashes":{"sha256":"80b052b549d8b36d49165c398a9b1532866253d696db10578ec3b9c618d47743","sha512":"9141801fa625661db114ae86c5c5e729f8ee2a27bf9aaae86569617c4143a5a59df3e14ce9ed3a37344472c3867f77900814d79de05b7c1d71e3569bbde39430"},"length":14117374,"released":"2020-04-17T00:17:04+08:00","url":"/pump-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"pump","hashes":{"sha256":"9a6c95b8547aeb5bd61c4e0822e870cedf7e8e52722ebb0f30c7f83518cbdb5e","sha512":"1d5ca29123557ea58dc3cbe30ecd98e26a08b3fbe0ff963456847df5881a61ab11bbecf8e44dc0f78484d86d17da04b646b31499f0479ab1aaeae00d804fc3f1"},"length":14137982,"released":"2020-04-17T00:23:18+08:00","url":"/pump-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"pump","hashes":{"sha256":"a48bb549342cf7c349351037ce4f23399f8d6079becc1af8d1126498d56668ab","sha512":"b0b4804b38ad98d5052a7a79727e73e63b192a7e96f44c5b2d33e95de4c68fffdd6d2149b87b7f230332430d688f4b94c20b5a4c31b01642353f0b840da01a4d"},"length":14177470,"released":"2020-04-17T00:30:31+08:00","url":"/pump-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"pump","hashes":{"sha256":"0edb3bb2d688d5715827fd93ab07aae15fa2e8838c3eaed6bbbcabe4e1d0a7b3","sha512":"e24b8daf4b59d419b6fbfa44c78f1be97e1d3a336e825e9c729abae632d52354cbca174bbf076d9773ba2c399d1fbb9a19e8d986319bb335d8b1418b42f41f64"},"length":14389107,"released":"2020-04-17T00:40:06+08:00","url":"/pump-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"pump","hashes":{"sha256":"fa039ce0d0082bbe1a972a6584036e1af1885dd3a1005af8b95742351ffb7ae8","sha512":"37321f65c2e5693f43945d51a5cfb06209c4e4eb06204f9769ad2d7bb0e08b1e049b47a8fb1657f665da5d34512b456cf48fbeef9a43c8c83681d888789334d9"},"length":14389031,"released":"2020-04-17T00:47:05+08:00","url":"/pump-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"pump","hashes":{"sha256":"bf18a5a75c294ebea9a481b60b30e4790f4e513ed83cc6566c85e391e5b1e286","sha512":"437dc995e815bb0cd613eaa830b3a2baf0d62fbdb5d280ec9a2eae3ed9a88c365756f6123c2e06b222d25459117ce585a19c5b90747aba0b51e5c1ada8e412b2"},"length":14388204,"released":"2020-04-17T00:54:55+08:00","url":"/pump-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"pump","hashes":{"sha256":"578c0c4e868a3c00c3e3771c5ed00cb4cd711477fdcc13fb7301108992dca698","sha512":"bc6780d5daf78a13343a5b83b0584d0ee73fe589e1c40c2c282f4e72487ed84084da098858700a4d287d7fe6a67c26297749f4c3af39c2110351f8cc71f18e8a"},"length":14396324,"released":"2020-04-17T01:01:32+08:00","url":"/pump-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"pump","hashes":{"sha256":"87a6f9740571e1a9497e76b96b89fd5d452298a3612269fb68caa51e27aa7b12","sha512":"260d7d89bd789f4583287b26c7fbfd84e5422ee6e482121c4679f786ff3231789d574b3cb517cb1eaa9f299b04ddf255bc95e3e635903b9a7b3d9e48ceac5d14"},"length":18801409,"released":"2020-04-17T11:10:06+08:00","url":"/pump-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"pump","hashes":{"sha256":"d4448ada9a3abf7f430f6db479aaf14d41d456a36f113f26a815d7552b81abea","sha512":"bf270a547b9a2c335a0278f17f4f24d6707a974a6ddd2f71856edf70559c30a5ac57fe22a7a13719dfa39ac90e9b750d46961379a39f760fa52f350c44527401"},"length":15203295,"released":"2020-04-17T01:22:44+08:00","url":"/pump-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"1db23a377ec872561b78a79594513e3ff07c6af5d9246d9515a6721646e70ef0","sha512":"025951db8cf573d6f4f8f3425044a88a903370a19577b58b1d33c4bceea0c583d9ac557b3da4f0871c9e698f2e8c306d4fab8850cd3d452a0a26298f4c2c43c8"},"length":15372520,"released":"2020-04-29T01:04:00+08:00","url":"/pump-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"pump","hashes":{"sha256":"3140e3cde3713f8757eb8a976e2cae01364c299874faaeb3e54a3f71330b7832","sha512":"d5d624b3893a70d8159e7b7b8d40108efd8baafd1a3027ff43700eda21f4c79b4cb6f88cdda625c009983b2627a598ac36ea01c4f35cceffb72b3889734db87d"},"length":15372137,"released":"2020-05-15T21:55:15+08:00","url":"/pump-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0":{"dependencies":null,"entry":"pump","hashes":{"sha256":"4e83f1af46ba9dedf721467db1b241462d8bd82c3bd1565ff76a21af91cde159","sha512":"4e1f56a8897783ee1713240c11604a74f71accc3ac7db7590624c390d9affe9380a505a3e8f8f5c8c84979ac91b531686e31200352234eda0c2968d1bf36bf9a"},"length":14502977,"released":"2020-04-16T16:58:52+08:00","url":"/pump-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"pump","hashes":{"sha256":"616e95a932f7fcc09ce50c49e6e9f3f2c39627bfe6e854e98dfe23b9419c15de","sha512":"06cb4d0f41f78a0f93b3a66f355a6f63fc7f67e9c03b13993c6a5315856cb42c4bc8e17af14e3be3fd85b6963a223547a47b8b1b8f3b1823a902fd88564f836b"},"length":19476719,"released":"2020-04-16T14:03:21+08:00","url":"/pump-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"4358dd00c30597000aa6d9f3ab914744a118c9a64f095a965876c9893d955240","sha512":"d52d418eade19560d37ae1bb79b89142b131d8da77861c10db014e0bbfd7c211e701f89145e81be64a7a56b527952511f696489e4838e3e4e7ec41a2c41d21a3"},"length":14587380,"released":"2020-04-27T19:38:54+08:00","url":"/pump-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"pump","hashes":{"sha256":"b3674670dc76cded07dddb01dbb3412497b636f78fa57f81c2f6dc647ccd66cc","sha512":"d5dcb9e7544bcb747e06e25a3267365b835a27d61ce22289481d4713e37fe8e8725fbe7c047618085a23e347cdf33bad280a4728f9ee850a53b4f9d4dd96954f"},"length":14490852,"released":"2020-03-27T23:32:10.702063+08:00","url":"/pump-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"pump","hashes":{"sha256":"84c50d49e05d1ffc49eb200540ad590b05c62898bec2df4362c3391bfd00b5fd","sha512":"7e9d49bb73a6f4ebd7290c347976cd7a51f7f1cc35d43175895b000d6f19f9d75996b7a002c4c94447ee8ef3a819c17bf0c4de748c1eec152d88795fce398cca"},"length":14490837,"released":"2020-04-17T01:09:54+08:00","url":"/pump-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"pump","hashes":{"sha256":"2e721256bf1ce4ee77077ee139262b54662102c91b4ad30305cf76a6b054357c","sha512":"b53b56000d29f6b387c0827573ab3eee5989f229611615953717176d6428c370f98adefb92e8715949ba844e88047b777cd698be364014b36f56b9a0f1e02359"},"length":14490837,"released":"2020-04-17T01:16:38+08:00","url":"/pump-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"pump","hashes":{"sha256":"34290d42f4258e9a8673cba4e79b32d3c68ee60b46b9bc8045ba326eb6c730fd","sha512":"9ca2c094b965f0083233116cffe7ceb3664134face4d6ab39c7b6d072e71c8b558b4cbe00a3e01882f40dd82d295d442dafd2d25b62c9637d881c79c308eda8e"},"length":14517212,"released":"2020-04-26T17:23:15+08:00","url":"/pump-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"pump","hashes":{"sha256":"9730670afc8ab44315e6b74e34b5792b9547476cc66445252d04e6df8b080217","sha512":"e60b7c239c439cf04d848d13112826961fbac9a7e223be933e7c3ae110af673cd90058dc94426aed345233acfb2b72253e14a1dd32da94c7d82afcb54b7e5fa5"},"length":14517222,"released":"2020-05-09T21:12:05+08:00","url":"/pump-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"pump","hashes":{"sha256":"0bc5dd2ddab4a1fc94b5d6191c60674e8cea3cd3e720fb870dcf02633947b80f","sha512":"1df718ae2813f5d7898ba4b4b89a56247635934c0d023f057c4af2e173a3f4dd9774955019261f2dd566e21a2375267f0170d103a1043aa0eb5a7d775e9130b5"},"length":14618618,"released":"2020-04-16T23:55:48+08:00","url":"/pump-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"pump","hashes":{"sha256":"ae531a0781d652a6706dc7a2d351f037604bf60ba3cc06440299bb7a5b1fb608","sha512":"942c57faf693ad47efeeddf13c95f209b3bd5c71f1487408014c4dc9bc51ae7ab13d755928e76b3147daa40017dff0ebbeb170ae0a9f58fe62345641da87d1a1"},"length":14606912,"released":"2020-04-17T00:17:04+08:00","url":"/pump-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"pump","hashes":{"sha256":"d414442b9186d6c4a3bb57f4a68883510189e7ad57b98370e82e64f4ecd867f4","sha512":"b13743a63629ee72fdd5c06550341321b7b1bfe47e25582f7608811f982ef66f0990aa8d1c088cb185ae54a4f88eb254aed8e3163aad4e28f7c66a8bc77d73b2"},"length":14632259,"released":"2020-04-17T00:23:18+08:00","url":"/pump-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"pump","hashes":{"sha256":"7d504843b6b4a36360c361dd2dbdedff6d408904706adaafd05114e9fcc6569e","sha512":"f89ba97378f9285d3494173d4aab0475881d01f6a018bf191374dcecb1654f9631fffe9f5bf724d89a6269d7f07271aaf4952d75440b946be9c5b0b829a21ecf"},"length":14676814,"released":"2020-04-17T00:30:31+08:00","url":"/pump-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"pump","hashes":{"sha256":"c192c3909766d4457890bf779687031debb30e55ccf29139fda31a543509b38a","sha512":"5032f3a72c8dd37007dfd9f118e4778c4d6e82f544a6e5730a640c40f2861cd9c0b99f9d11ccecdcfe6d34e21a2e467df2d690e1e702d68b749e02d7c5255884"},"length":14485194,"released":"2020-04-17T00:40:06+08:00","url":"/pump-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"pump","hashes":{"sha256":"f83f322e27ceab65617410726f1285bb6480ab713e8fc08729d084c67102c0c8","sha512":"4a3732d8bb7a1b40826fa232ca853d6d39eee96ecabb1bc107e9f10f8b31d3c680199d0ee2de643b34a546aff76bbb1a45aded78b594dff463d98c9ff64fd8a6"},"length":14485208,"released":"2020-04-17T00:47:05+08:00","url":"/pump-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"pump","hashes":{"sha256":"09ce29febe890df425e644ec94f934512fd4309eb896343d01e22a752fd9ce1e","sha512":"5e64dcc0251aa096da4d488088fb8cadacec97dbe41585fb01a22816039fa49934c301bdba76ef2459af082a2ff0070b857a9cd745ffc5ce8ca15857b5e64703"},"length":14485405,"released":"2020-04-17T00:54:55+08:00","url":"/pump-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"pump","hashes":{"sha256":"7ecc5cd82c87009ac5826f1b67b0914e1ec4cbc59826c788b360b0f33b88b1f2","sha512":"3273452db77376dc18d8b32c9dd54ecfc6d6e722384677203272712f72e5ae320b2597e3cc2e4b2cb755a12db01e7c4ba2aff4f10aa2c1d29ca6a3f2411ca57b"},"length":14492610,"released":"2020-04-17T01:01:32+08:00","url":"/pump-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"pump","hashes":{"sha256":"4f2f387870ea2ce812e0e8f27e2a3de60400c27a72f392045368e63a4252a40c","sha512":"8ac788df2fe9b6abed163fea5e2199834d52d742925e190762a758cf1ef1e9d8a02d09eaf609710c6a7304909fd957c3c043cb59bf9fa44992a456ec1067ec1a"},"length":14897150,"released":"2020-04-17T11:10:06+08:00","url":"/pump-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"pump","hashes":{"sha256":"06622e7dd020ee2a9c535f033b4f379fec6794aea773b49a5f08b4099d0cdd18","sha512":"840f9f00dfdf54f618586715a2979e8907bdcca53e9bde01b34d7ea663efe01007e7d55500afaf931a1e807472d84760c6a0b96a9d7cdada586e325ef6c32f97"},"length":14502895,"released":"2020-04-13T16:07:42+08:00","url":"/pump-v3.1.0-beta-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"6f0dc4457ae9893d136ef9964f5aa788382542b3e134a85775733717c73dcc98","sha512":"eeaea91fc547733d9b6198fdf66dc32f0aabd7fb48f721b4348ce4e1b31235f42e9a79382144b5445d1f9e755d949d5dcdecbb88cf5ad719cd979a064a6027cb"},"length":14500490,"released":"2020-04-13T15:45:26+08:00","url":"/pump-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"pump","hashes":{"sha256":"95a0f9f0d5700667a52283af10d33958b43548d7f6eb147ea3de3bbf40075b94","sha512":"1e659a63386646953307085d70f5b1a6f4ee9f0501351104858befd7990aab6841047e3953e73382ddfd6c424e2da5f59bc5d3fffbb522e7989980918502dcf0"},"length":14512321,"released":"2020-03-27T23:42:03.853088+08:00","url":"/pump-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"pump","hashes":{"sha256":"a48654151a8f13fa7aeb36af37199688746cd72191b01d73c1a7faff9de1d439","sha512":"ea0ad427a7c1b40359029c9730bdda978ff1422411945ecf10b61329413d4bc1075691925e2eb35cc485b4872d019f82863eae8576f80f745beafab979eb0bb4"},"length":14895389,"released":"2020-04-02T23:43:07.933119016+08:00","url":"/pump-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"53b0a35fe36636816c785f7d4d7a6062e2dd112216e189325c62bbb9facb255a","sha512":"e678bebae4c042a456e0a8c5d207e70e5deca780a484b541853eb64b1fec2c388f55810db901d8e7e33d25d68af2e077ca4bd427d7fc91f23acd733658f6c323"},"length":14897168,"released":"2020-04-30T21:01:52+08:00","url":"/pump-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"pump","hashes":{"sha256":"169d3afcf870885e8e14858d67558b432f286820a68838040f1f42549ad063c0","sha512":"dba1899c1a647d4a13fe605fd64cf58f80bb31bdadb4509a0d4f489844553c9bbaacec04ceb4f9cc1e067b24197c4c8285dfb05b6147f89cc99dd42f2edfcab0"},"length":17701728,"released":"2020-03-27T23:44:51.76646+08:00","url":"/pump-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"2ffbbf885951a0f17e7ad8ffa12f002101009a58f03e488ed1cea325524356d7","sha512":"a3a2d8d9db23d13e5ec10d8d29f9880e5a368620a9036f0cc6995889d9e0600be32b733bdbf05905d219d699384ac1f309da00d87a9b7047b7e09302ab8a9ae1"},"length":17697308,"released":"2020-03-27T23:48:21.308496+08:00","url":"/pump-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"pump","hashes":{"sha256":"744763d546c313a784ac20e4762cab2347323502eddf8259f8bc1497f947c81f","sha512":"859fc4ab9b11374ad050776cb85e349cd3012df4f196e6f90316b4d61c63acc9f542c4962a29b98ec06ee21452bfd93690b7518daa23425d5f936e5bd3c97a14"},"length":15240768,"released":"2020-03-27T23:52:08.65238+08:00","url":"/pump-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"pump","hashes":{"sha256":"ceac3f261d978e2855202127b3d3969acc2657108fa4a4daa3c90c6bda751298","sha512":"f759efcb191d0648cce1fb459cd3767aa864e8beed9354e1c0c2d30c7de4f7e8b2d0ec39b9585dd7703dc5f1e5cc7867b52b83892009de17c3dbc998edd0b9c0"},"length":15316933,"released":"2020-04-17T01:22:44+08:00","url":"/pump-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"85a830c52d3bcf47b1a4e75ebc494f7acd810d053e2bf6ab657745748dbdb4ab","sha512":"41e57b3e2083984042159cb1e731684abcccf35d6478c10ad0c53267f2b12b35edf2b93147684d8bdd2b2b3f1d2c64f9f698687a454ccf105b03f0d4c73b2923"},"length":15486075,"released":"2020-04-29T01:04:00+08:00","url":"/pump-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"pump","hashes":{"sha256":"e5954ad887248930af254ac4b0bccd267b4414f9c3a52c02e2492e3b49a84134","sha512":"6b15b7746326dec363e8b6fdc2a0da0ffa3b1ac3730af24bf73a170308f435db8cba17580aadf03c64d40bbda76eb2fbc2ca062a43c0cbf209e5ce3212384938"},"length":15485174,"released":"2020-05-15T21:55:15+08:00","url":"/pump-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"be76f8d192617e1e44f830af44054c7e7d2d442f95468ad5ceadd234cf52d3bf","sha512":"a23aaa03986e31f9837e68a6e3606bd133422d50fd0abd2a3f21e4e45d1960b94d3fcdf927b1e084e4b0cec929cff21d1d2cfc3a8a8f8ffde9ccdc36b1a3a579"},"length":13268417,"released":"2020-04-27T19:38:54+08:00","url":"/pump-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"pump","hashes":{"sha256":"b1d8e1e047511cd2e2904cf3ef2ef9476b8e74f1cabedc391b97be7494e385b2","sha512":"74e3050d9f0da08c45d59004198eeb563155f5bcad494f3c42c3a41ae357a8bc75389b3824f00097f54974e66d97a441c499b6528dfe0d588d10b54be004b812"},"length":13572664,"released":"2020-05-09T21:12:05+08:00","url":"/pump-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"e684969b3b1154c3aabf0ea2cfb3560b709290abf9554e2941fdf099cb540609","sha512":"7bede743db2a22c8bd943e8e66e1580f114fc7c6a69d2bc65b961b801654692b8547bcac4eed2368ef72cebfeaa925ebfc921e6448e2f689c60de5b8371c5684"},"length":13924753,"released":"2020-04-30T21:01:52+08:00","url":"/pump-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"pump","hashes":{"sha256":"65da0c3df0b46ff5feb3f7883b883add4e0cb9abe0353efc6af3f82de7ce6faa","sha512":"a5c20216d836eccbfd206faf584cf0a12927f4ca778fd3bc413d7728772a9f399b42fa173a32b1f872b0b98c5c569fce2b92850ebbd10fd8bfbe97705c7b2f60"},"length":14490880,"released":"2020-04-29T01:04:00+08:00","url":"/pump-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"pump","hashes":{"sha256":"b540168e36190bc3486d23f8369e321df78391a7dd2231c62cd3925406181112","sha512":"0303611ae131c2cb3f687de621db7c94abba1bbbf076af9d339adedb45005ba485bf5a07366f828b669d0b0f0cd5566e89f1dc7d67bbe61343aff63190c1c367"},"length":14490886,"released":"2020-05-15T21:55:15+08:00","url":"/pump-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.pushgateway.json000066400000000000000000000036271505422223000247630ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"WxyCK9PZxyLs4iWR8SkqF9aCjaG3e6MNCmpF5l2ZOOF/HUTavRDNTEUsW6E9cIznDRypgMI3EjWUuncFFLXnj54xAtlrsZXMze++0UjgOVtLFZeNZqzfY6VU025+VNDDVjki5Y6hTH7D5y4+7RXNinpp7SAyZV+f5ds3l2Rm+xEZJL+9r7ahwaNa/gDI06sFTJ4Y0VNyXWtlSoTdVptqHNkZ4g9fLQN5H7bGHHDAJnOTy8cIQSvnAxMRKG4cHQT8g+mTl0bRfanR0O8IxP4d854YnmfH2fShwqnEhj8w/kl/1Xwfbkla0/cbGUW1gpttNCI2+k03s7t0oGwn4C/xyQ=="}],"signed":{"_type":"component","description":"Push acceptor for ephemeral and batch jobs","expires":"2031-10-21T08:19:55+08:00","id":"pushgateway","name":"pushgateway","platforms":{"darwin/amd64":{"v0.7.0":{"dependencies":null,"entry":"pushgateway/pushgateway","hashes":{"sha256":"23decc4c4b31e93d8e980734f5a8a7b1cc41bfcb6286f852653eb0e5396868ca","sha512":"e54166c9051935afa4520f57da4c96a11d4b1cf4e6b6ae3a94019fb870d2f9c028a8d7794e3d710a485794af67f36e496667b63151473a9b0afed2f2912751d3"},"length":6557720,"released":"2020-05-20T04:47:20+08:00","url":"/pushgateway-v0.7.0-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v0.7.0":{"dependencies":null,"entry":"pushgateway/pushgateway","hashes":{"sha256":"8b42c84666ff35964aa138ab66963888cf79eb5a9bcd242ea38db01259e84716","sha512":"9eea6e70be1beaa3c0c85627cf5d6e127472c4283778678b014bf7e6defec3f866c21dde6ea7cfc887a4018bba6197c399908f94893e29ece3087c1fda204357"},"length":6555421,"released":"2020-05-20T04:47:20+08:00","url":"/pushgateway-v0.7.0-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v0.7.0":{"dependencies":null,"entry":"pushgateway/pushgateway","hashes":{"sha256":"1b415bb473149327c27ac5b63a284b08592e7fa84d4c6116bef6711b402089cb","sha512":"d58897246ddd6062195d5b5edb7542b86d61fe38ff1a24aaa225a025b145ffd2b408890efa0bbc2cd522701f9566978b4a3e6456f22fa69bf7b2e98d2dde5747"},"length":6081693,"released":"2020-05-20T04:47:20+08:00","url":"/pushgateway-v0.7.0-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.root.json000066400000000000000000000121451505422223000234000ustar00rootroot00000000000000{"signatures":[{"keyid":"8ddc33c926240004eff0863b81a2f6a4bbf8861bba3f6fb2d606c284cb9a1a48","sig":"izKXX6mSLGBzWqfpxVKba8JUIlwweEoaO2V2ocpDMLThtwal5AG3IseFLlOFhawh6hZMzdnr0zuP3vfxrh5KpsTVgAbHWkYuScdR/fdxk5aFNGEx5wiNs7f5ILEue8/hg3nLN/Q5TTEaziC3Edud9dqE/y0aZpF5vRfoGngaFclEqA9mF+jr2N1dW+YTXQsjZ2VGdlaRAJAiq/BtcCvLeII9ZhlOwoCFxGlmC532dt0PbCP8HULatv1W3g4MeSdXZAF0fhGmhsVS8dya27XP6xXAbaBbs/hTIlexB/kRaXAd70XMFGVXBlaQZ6MNUYMMxpuDHB5xCSU3QiSMxd4MsQ=="},{"keyid":"10b0d4973ca384ddaed975f2c0551aed1b3f0a36602e4df1ae9cb59e5409b0ad","sig":"RG/Poo7RJ/opEehhC9jHI8TZeBxE925KQ5V42SXE66l/FXEVsAKhsUhZRBLonBSkufbYWt59sx9i5EjR7QwRqUJcyXi8CA1R2dYrT1hSF3nVD53gp/dj9wyw4BZWLlVVeGxnAXGe5M2CC31xztOMxIZJ28TpGxS0OLUVCUOHXy1mKl2u8+BX0iLj9TBbKbAO0gp09muzE1Tl4HD5lPDlLscivUpoTC9aLVyPDv8ezTfXFcqZhW9lS9qnToJ6IuXdcRg/xLql6RmZ086ZG+iYUEb17MB0spakLkdsV4XoQO1lupK8T2QssgLpBYApJSjAmvLDm+9eGftm5eEOKXne9Q=="},{"keyid":"793feef223a92b4ff1a0c8259bfae2eef9fc5aa4a2f4a018fbd9e46542195a52","sig":"S4k3zqejwVoCr6/v69p8IhjvbL+6VyDt6oDqxn+LzY24u2Q35VGXOwmpPmhjmrvrZzpDUTri04ewR4s2aOSXJOJOXVLKUmcseQMLoxUqf2AiJ4QZQs4S8eCXt1+7VafHIjBl01B2cK8dLYKH09lcj5hS9TpmplqkdYjGYxIVLxMwaIReByIZmdPib33EDxjftNro56KEEIXTO3akWv3MPlc1FnRL0Lz/01bUUXFIwJZsFVcJHp9zPgnqJ+smkrV1Hsj8LnvhyiaAxlvh7fhKuiiKH87g51BPXyJ0BFjIkjEqitgqYiHu/245eZfLDlToDovEDzr7WPSHzP0ombUvvg=="}],"signed":{"_type":"root","expires":"2031-10-21T08:19:55+08:00","roles":{"index":{"keys":{"c7b9a2470b206ec002c02bf508b58df0f0a72474f27eae0e70a9836baa084f2d":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvk0fF7XTB2BFQv3+M5Gx\noqRn1WDv74OtvSNztn+xVhSa1QzMroFVIbFJfC5SriR1VbdiK4V///OBtuEACn9q\nTBlbAFQ7r41N+jogG0yT+26F3R08izs6KUd6RZiqHbn9Gn30dsrqkzWrxfuETLAJ\ndC2lKum8I7D/SvkX1GdpBI2t0h+5OQtNgaK1CgRMxSMYA8KNKvOs2ICQReKoLiG2\n/N7PGCzeXB+QczXgJcBs2kP1bAm3Zi6VJEJBkjfov+nM10SWBbbBPuhqeQzmSPmv\nnpg6m2m/T6jTdRqGGjmWm5xczOgabKo1Ums24WpcGwCZBFogeRnmiVDm7u8H3zpi\nsQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/index.json"},"root":{"keys":{"10b0d4973ca384ddaed975f2c0551aed1b3f0a36602e4df1ae9cb59e5409b0ad":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0eYvswxGngF9+xvvDoyH\nZl49iB5iWDsYjAEaLsXn1wrpMkQBqxEiJJDO92ANJ3hjYYPgYbfJIsEKSLwq4iLD\nRm16sIngDRm0nWf+pIdKSvZhu3/9WZ2WNsAyf4tQyB+fG7m/45X6Aajm8A/o8fDj\nHuY0OZRE1om/PUhd9U1XNeNYDnG8NIdVP56SJC3x/s1Vo0CM1910dpryT1MCst5l\ng7RcfNJT09MP0yN1EuEmbpcVM5uoZPS1nAYJxJerxzd0SI/onH1kFqmQQfYZDCKc\nPt0Oxkelw4AsA7siCW4Iqe9pJuWD5AkZCarVK8aCNdQAk+LMFITipHZV2Wtzd3de\nHwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"793feef223a92b4ff1a0c8259bfae2eef9fc5aa4a2f4a018fbd9e46542195a52":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxzr87ltVVJAa+GFjYsS7\nLuxyDyIhjRU3Lz9U7TfrjPI8V2znO+/pfz1D4ByWQK/KuT3STpoMci8K/YPC/vQQ\nO/Zg4RpsiZ4wS+HH3JpIqjaj9rAk2NOaFbK8ntNDtcXoeFjMT5yXTB+SGpvpPCRF\nG8/hnJVwNq6ouHzBc5iSs/yShSiRwADPeHgt09jg18zwbMYcfYWLjMVlxpFF/RpC\nUpYzs9XZ/exKOj4RbB/Ckl193r+Ve+UPIokEG5E/neVKyxa3dGcyglODLmZiSeE+\nBq2xTfTHi3ENYHpwAruBMvA46T3sryCJDEWjDj0EIhrMod7AiHCODQ6rGQSzcoKU\nvQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"8ddc33c926240004eff0863b81a2f6a4bbf8861bba3f6fb2d606c284cb9a1a48":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuqD5zVSWWUhcpZ4MgbfV\noq1RkQdY6hmoZXMwRrHTcru1tAcQHv+pqrkTU3P7T1mKeX9YMbG63GSBQJQuAxrF\nHXwKU6geEwSBsP2b4c6dZ8srPLwwklA9Ilr6x1gd74YgN5LskUNjLo6YWp0trG0c\neMbQ1fD8Ng7qNxH1m7uZaPOXIoVBMEQvveGPGqEGYFmYw9nk+yKJA4ldl7nEVUq2\n1otty9yLYAB5D6At7Ve1G/V9bL0P1lQzuTXnWWsSenCDb+NhrPaFiXaKcOtBfAIF\nLbXL7oOu004ThnVmv9H0kGr3IHh4dsHOzxsVvwV90ysMDMVwWaMH/l1aYogt5i+f\naQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":3,"url":"/root.json"},"snapshot":{"keys":{"c0bc219c07f04771557c54fac348c4d3a3122e715442d25021600d39274de064":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwFNV21CSUy8kBa93fn8T\n7kfR7WM4EekWQqHvGGsgYQ1cFiUp6YrLPS7zVSwAia1yriUtkFyZKN+g0cZOCSCq\nFWeKGR1wewwKb+5P/NGpzDzc4AlqeAs9CBjPKG75j47WJ+jG7q2JPhXiogZ9WJxn\nBe97U8G0sGnFHpby5CW10OEkQG1NltVruVXGk0fNtQw/tzfoXFNTiNO9rJfkSWAw\nkpiA44IWqv7UBGjD2aFhhd4Qe06o5cOp0WIyRAopTVfTzxvrmaqLLrOrNxQkdm0e\nY5c+/+IOS2iFn3xnEukpIQdnKJMRM18LK4wp/PYCroFkWf+itxN9qIU2tH6mnGHX\nEQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/snapshot.json"},"timestamp":{"keys":{"db75ccab60452c4fde6c7a42a70d91b26f24bd56ae315dbd1acdf2356664f1e0":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuMmedZUl11SjJiGL3kba\nxTrs3p7p3j5ZQ14KcRdPULVuUi/e72OtUQJsNzCu7Q2n2Zh99smFizWpJnFDfn2s\nv5l46lE2xzfPw6JFnkpCua8rBWPEMzhi68pJNKfOPp4cVk1Rbu0TA+NCHMAF3QSb\nux6+a+71jvqayC9At+pJiR/8Kf0fP+tsdmp9XSQ/hQ9ZLV7VuE+1cTP0yhC94urr\nFZxlOpfXhyHkc1pABvbddadr0JMCv+pgEGjIdP51Z9R3KPOXzEkJCZ7fs2/VB7ny\nJ4PffEAl7eKA2RPKrPjyjcMOT8q+ST6/Mbe6YBUJVZckM3DrZi4O8P0Q4Meb1w8g\npwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/timestamp.json"}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.tidb.json000066400000000000000000000623761505422223000233520ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"iBbyC1Hh1q7WBXsQp7YHwu0Cti88Ki7XdoAgaXP8phqZ2Quv5ZU/lK6JajRCxmxM6yvWwcQnsCb0oRMSUCbA10ZUemik3c0b7SpFHuDWLKRqbpjE6x+SRas/nDn0BtppFQruD8jXsEP6hbtT1cIzzvUGPFiNvXxlMC2PLgGbf0d0Jc/hGWOXBjT+arREpN92NShp/V4RgKbeaLTB8qpjq5QitR721Nj7dMGqBEYfJbXFs+oSZpP0LPf9IWwbH3GzNcTsFZBCdK+08AZLDMOCixrRLLktxbATE0vsrnScPptE5NvHV29PgnnmAaZ7U5aECS0vXNeu95EF6IhPMZEP6A=="}],"signed":{"_type":"component","description":"TiDB is an open source distributed HTAP database compatible with the MySQL protocol","expires":"2031-10-21T08:19:55+08:00","id":"tidb","name":"tidb","platforms":{"darwin/amd64":{"master":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5e5cc31f436703a0f641b713e444e2edf62843d5f919e8963700a22aabd3a355","sha512":"399ae8ce8c883502183304445aa9b055027a856edf576956d3ee0bef784be6018cb2021f5fcbfabed4d5fab4e5bd8a6889bcdf94eb56dd2dfcbbfbd094667cfb"},"length":34320392,"released":"2020-03-18T08:39:11.753360611+08:00","url":"/tidb-master-darwin-amd64.tar.gz","yanked":false},"v3.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9ef93384b98a9c695d1be15e58ffec4dc64ccd0ba2a6f3e5b80a44e0ee55de03","sha512":"9b7e2e07548e24e796332344a6137c4d839e9e75a42bfa4881ee4033558b7a9e3315b3f066472b80b12ef08d6ba8433d1509e38111abb29362bbbe2925206223"},"length":26234205,"released":"2020-04-16T16:58:06+08:00","url":"/tidb-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"61c3ec4864ec1c5551053c4247efcdd90e035bba809e10c943fe0acc89aea40f","sha512":"4fc6458d878c8c94658de5ec4afa6cdda1fcdac0a42b85d37989e6919b7e13012bcca59954384890f6086ed18de318fe02a3c6cdcd8a4bc816209260cf5905c7"},"length":26234250,"released":"2020-04-16T14:03:31+08:00","url":"/tidb-v3.0.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"664e6dd1fe7e08ff4cfbab732a623bfd1d490fcc17dbde6c60e211cd1e9e501c","sha512":"bb628d4ba89a0a7011a1ffa59eb11fa3bd7f27ae5349f367a0992412c6b03b9c091ef3378d05a064228cb40373f6bcef4dc3556538a144d254bcc867a3ec4e07"},"length":26302822,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"01eb225d187647bb6bcf5132ea579b6801569ab933bd5650125294d9828f9661","sha512":"9f5dbffea7cfc306848e51511cb418a8e9ea5e21ad8699157407e9403633ce39ee9899618d26f97eda2713b8aa0be231612f77e9e1ee4e87ed934c8a9a3cd9ad"},"length":27351570,"released":"2020-03-13T14:11:53.774527401+08:00","url":"/tidb-v3.0.10-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"94109b4a3fe079bbdc6a2d2020190b975ffcce4132589a4cc7f0f47d9c12377c","sha512":"fa872451b0d287356b61d94440a09aca6f7ee180a5e3ad92156806ebbe02e4fb91ac544cea7f57a6d0a224ac072cad1ded8aea9becffc3dd6103a91c5e08e19d"},"length":27362399,"released":"2020-04-17T01:09:20+08:00","url":"/tidb-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"775c0fbf9bc882a7177e05d6b81657c3c53dcf56233d21ad246d21cc31165bbb","sha512":"f8d5827e0484df406c8f42fabbff18f6b5c33795022504c0932937e70e7cec792f2c3cef90628f73ffabb1e6403634e343346a1bfae3446c3acce5b69e2b8c87"},"length":27399852,"released":"2020-04-17T01:16:04+08:00","url":"/tidb-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7a9d935f01d23d6a9393e13f5ca96a8558e529bb6eeb1641fc6402e06c746670","sha512":"d791b4fceb8224a0ab6ba52b6c042493c0824861888fc69189ff0e9576e63adbb9eca6050812cd0c60bfaee43aea40586f58b5059c9f34be5a9ede1abcc9c303"},"length":27399048,"released":"2020-04-26T17:25:01+08:00","url":"/tidb-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2551f0d2e13d5475770fb85257266c784efc22dc5ae7eb50bb3820eddfb87c4b","sha512":"7d0567a8ea11c7435cb93ff3de5e5f1f41457e743b4cf640eca96178c933ed905f9c61a7a419786cd9c5c3e6ad4dc59d5aec639e58d0a750a08c5a34e45d3fe3"},"length":27501728,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7baba9ddd32f201474152a3537c269e1d065c5967a9ceb2a287b8640a2027c8d","sha512":"632cf0b707dce863a3474fa046375f3bcdedca99434dfdba9277ab51f0856ea7f0b126003b4b8912ed9e027517220614504d599111f0dea76137b922cc2e8716"},"length":26394428,"released":"2020-04-16T23:55:11+08:00","url":"/tidb-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f3a2526156222d95917876e3fc2ba5a50968cc563cffb198e8f2d7ab909df85d","sha512":"fa1182b567d3e4ed7d20d3cde5489359f17536744a8b24d036dd2f60ed75b6631a6de44b6ee79a6ea69e15e1ee2c9af14890116a081e246be69d685ef5326dc8"},"length":26459795,"released":"2020-04-17T00:16:31+08:00","url":"/tidb-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1085fa01a2d7a5d20744cf385774e30a3a53737688a8a8153ad8d167267f998d","sha512":"719c835bbe95290541ca023e0a8a73118083cfd72ea351fa256e9478da10443a691c3cab2d3f1c820de38b57a7919585f53495629dc5a84f0ac5e74a43ea6f0e"},"length":26563703,"released":"2020-04-17T00:22:46+08:00","url":"/tidb-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e9f9fd19f1366a9d3313333e94b9f361abf913549c13f445acd5903b4e75d49b","sha512":"e2993b21e77f2fe1d87c25a0bd432f1998f39d6b2dcb78b678aa508c0f338fd44705e7bd28adf3af597cdc35872d78752857acb846002d3dae80cffd501b3a46"},"length":26600134,"released":"2020-04-17T00:29:45+08:00","url":"/tidb-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"85656077b92da06e0a95f27635314275d5b75bfd08bb4f83676248938609551d","sha512":"527235e2cd392a2a3d27be6a3591ffe0ceddf28647861fa1a760bc360fae24e6c3e47a0e823d8bf7247721e52df8a9c4ba8ff1151928c82941b5c8a7824cb0c1"},"length":27124280,"released":"2020-04-17T00:39:33+08:00","url":"/tidb-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"88fa1e2a9e9cc56450a1954997c0de988a799f7bcb33d1d028764b480806b91b","sha512":"b832e85bc59de8d0d4046986399a28db04c495aeab2ea5f5a6dd26bf36aa50d49fbc0238afbd10552e86c668c0048ba0cf4467827dfe5b4f32d93bb3e391c729"},"length":27119755,"released":"2020-04-17T00:46:32+08:00","url":"/tidb-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"ca07748d61f4b4a93eb2dc8ddd14e74fbf7875c0bb439002e8fa0a6fab038779","sha512":"8bd60421894e2bc59fa1a9a7907e4037a69f440cc5cdb4e00808404bcd9002cad04fc8d5d68a07dc9692f91dace644db2db76a6dadbd05564af1ae7cce67b931"},"length":27245990,"released":"2020-04-17T00:54:19+08:00","url":"/tidb-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"021df7ab08f6c56658e093e80f4bf1b58432a3a468699942652add363ff24996","sha512":"6c2af1f3f67db97c7eabdc71ce75bebe62f1dac1d881d74e452d22ae1b77de6c4192a214ed3f7a16a1275636a8d6e4b953528b601317b7d08265ef059b8d2976"},"length":27332386,"released":"2020-04-17T01:00:58+08:00","url":"/tidb-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"4b0c4d69c4e2699ccb5ccae89a81c08817cca106d37644ce261b4f7983e356f7","sha512":"86845acf140cffe808f24124e298db1467dfcd98df6abaf1607b4043ca4c37bf1eaa220e928bff76f9381b8d1929fa1a824aa47e35b482b80414024203ca5b13"},"length":28156482,"released":"2020-04-17T11:07:54+08:00","url":"/tidb-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7154423e6bdf5fc2c689c7fc725bd7c3b9530a5dfb158e54f5da702129910f7e","sha512":"368e779252547dc6405bbea771cfb58b91c5bd14586ebc14967774b615cdf67297fc5b010b55d0aa77b4e111eafc76842e98f01dd7ab403564834926641731c4"},"length":27202791,"released":"2020-04-13T16:07:51+08:00","url":"/tidb-v3.1.0-beta-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"addf1322c81a79d1a316ae9779e7f143135e703e006f8e1ae7f9d0a32d66cb5e","sha512":"a6b274419dbcb0f9796cee257f29d000786e457466ee819b925012992c2921cbd66570f1705b69cc77f14d03660bc750c66b04472fbdfbfeb45ad4a93cd01f8c"},"length":27422699,"released":"2020-04-13T15:45:38+08:00","url":"/tidb-v3.1.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d5a15fbebd4379fa823c869dc4117d19ff889561bb03b19110c5e84bd0fb26c9","sha512":"e47b4787bfb66e02e9233013d302e72266318e09281140e8dde75f7c085215afa8299fdfa0dc2929360e99f25976da6a9a2ef6e7513e9648234ef9768a28ec45"},"length":28062180,"released":"2020-03-19T00:48:48.266468238+08:00","url":"/tidb-v3.1.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5d3d621009148f99d4ceefaa1f75e832d1191eea711e22a5370b360450b5d4a3","sha512":"5d04dedd689f9efdbc6ce776e9161b23ec6d671076cc44e09880b9244fa02c16c0b2489dabc1060c9a577374e72bfae7e786bb53bfaad198ca196ac6227a8c9d"},"length":28184931,"released":"2020-04-02T23:43:17.456327834+08:00","url":"/tidb-v3.1.0-rc-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d9d5739daa91392bae0ce17455a22c86cd1672645373e1eacee6fe46f732003e","sha512":"fdb81f2c2b491b669830dd358b219520f84ffd07e7a90c2fd1448573f8f540fdb700b302ddeffcae3553f4276345e4dec88b2106fc0f25aada30f4a8d79b86ea"},"length":28369009,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e0d904aeee1c0ae215dc363d28d043f21b6d8e52352608e31ab7044b924968ef","sha512":"e1be572e43d53920720b918ee532302165eb9994a2cf46371da7bfe590587988c41d3944add3278e041f7c5947c1fd17aece4a14fe4ead7a3c027ddb6afc9716"},"length":32614395,"released":"2020-03-13T12:43:55.508190493+08:00","url":"/tidb-v4.0.0-beta-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"6b9ea806250fe52c7c6db880eee10f83e8e9d0b7c55d281b401eb93527eff1ed","sha512":"b93b373b5eb9b437badab007011fafbbb37c80626be33a46392067352455e8cfeba9faeaaa11d9266502e6bd1553463a3b5cc7d053c427eb7528197f7fcfa3ef"},"length":33489884,"released":"2020-03-13T12:30:08.913759828+08:00","url":"/tidb-v4.0.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"cd6e91ff48be1c7ff849aa2c3cad2092923695a3c52ef782f602bdf355697d70","sha512":"14fff5efbef85a7635354eeb915346da67cbc0b4f4fbc94e77cff0236ce37a0dd201a0a557976d8b887243d54a4b6dfee219d0afe4912adbf9ff083747ad231a"},"length":34318099,"released":"2020-03-18T22:52:00.830626492+08:00","url":"/tidb-v4.0.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"82150a188c455475b55ffe60b3851d846d75d7d777e8dd13a1a18399f7a1d169","sha512":"c06feb70ea979bc637b40433362e870c277385e87e5ad6ae214e76bd326d6e18dd7d669817a1bc3cf074fc5297929030b726c7d5ad2116a4e6094c6bae529d0e"},"length":34458496,"released":"2020-04-17T01:22:03+08:00","url":"/tidb-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e88ac6fff667c0fbebb8a4a5805d9f174ed57ab172f44a417efef69ebca2b310","sha512":"f535fd63037c1dddec93c9e6c4bca0ed27243babdc06fa80943480abe68843e2af04f9193129345d9102e0604f7438adc12a6b4852b04e5861490a23b9cf8d6b"},"length":34826086,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9c3d54eaf90e700e83fbe5354d980deb14fa4d080b93422b0b399efe8732df1b","sha512":"841f78400e06d77e855c56286081611e5861f8fbd9dee4d8408bd4ac7e4baf45c2c26b04053ab3230c8004ee7d2c08aa263c2df642711a2ba0298ff3900146d3"},"length":40461780,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"master":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"22a165fe1706a5f0c44a781e97c6f9f68cf8a80248bfc5c4d5b45267942bcf04","sha512":"a388e4c70ce87b2e88bc2a89825c93d74702debe98e4ea9d30d388426cc0369d135f5ad52f47705eaa876542783af34b2283b62c723b0fe0c3a1dc6cfb34cc99"},"length":33939069,"released":"2020-03-18T08:39:11.753360611+08:00","url":"/tidb-master-linux-amd64.tar.gz","yanked":false},"v3.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c08a9b1211af6f6a2e6b3393e2c9bacb2e5485739da0942022fd9e3b14949fd3","sha512":"948878fcfd7fb616cc4dc84545187492984cbf31d53c41b9c761d37bdb01068efd5779aa89919bfab938d97fcfdeb0f7e405051993c67278636daa9bcf3f4521"},"length":26787208,"released":"2020-04-16T16:58:06+08:00","url":"/tidb-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"91758e462c216bd273c4afabcfeabc06c037d555a6e23f1a06ab58b3b9cd28c3","sha512":"871c714bd1daba19c6a3729649b3c5b9f2eb3d739f7decea759d7efa85a15f9c2565e1e9de20deb286823ce53ad37cb7941be717fa171917c02d847a655deff1"},"length":26191751,"released":"2020-04-16T14:03:31+08:00","url":"/tidb-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"22d928bddc6ca4ff9b2686e8f47811387c3fa14bf495f75930247a203dbcdc3e","sha512":"52d35886548aae5b09d1f62008611cd45585303a15c8788270df72c1db81fb9004c9f32b5ecfa3d1eb6b62c1f768781b3b2348979042b24c5a3c606e6556ed12"},"length":26263065,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"346ddb4078fadbed6bd59dac98c04f2d4b7134a54da0018c9a16deef7e9016be","sha512":"f0561c7a985c00dfdcc0e0f2743f8a91be206cf1bc9c89929409f2e8b1bf109862fc7274cd0734e76364fd02193296689ad71b7a47b59a3e5d16534b04249fef"},"length":27008746,"released":"2020-03-13T14:11:53.774527401+08:00","url":"/tidb-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"872e3853db36d8b663a165ddb61127d6053c9fa1bcdeeffc0710a4cb08b104c7","sha512":"dab455a657647a2dcbfeb026eda321bbacea47daa11e11d62d91f8b63fb7be495748d673efd377c0afd329806abe03016fd61272251493c6e235f46a98031e22"},"length":27019356,"released":"2020-04-17T01:09:20+08:00","url":"/tidb-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7f5b79e0512114d42a0141acc69cba848e7903bba26fd9984bea383739d62bd6","sha512":"954e5f4b67dc725aff4f0914c7ab8fc2a547069897fa9daca858443e11e6cbd17beb41b5dfc90f642174249fa5eb65ccecfd70c8858954f0859c47a58c457764"},"length":27056612,"released":"2020-04-17T01:16:04+08:00","url":"/tidb-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"367de505050520a530f2c5e8bd7b4e1b58305e6a4408c3617c9658248956de78","sha512":"0311fe99bea76e6e92cf06354b95a2adc3ecd83f9cd72db8d1eeebc5ba837e52339fbd1f3caad4efd9b24903615478355529b5434995b0ae7c4c13b4eb783140"},"length":27057866,"released":"2020-04-26T17:25:01+08:00","url":"/tidb-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9e4a8ed5aa19e0eee6124ee9f86c400687d641454b461df9419fc5005b83fae1","sha512":"08c0ce3b960211d03b4608c6f335fe58d16246cf58db5a14112722ec86a5f9fbb8d4c7178faf2e3c910400cbcfe1890707a10c825b95320ee3605739d8aae873"},"length":27153764,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"a95e40e6de75ff4b7b16021524dfb903bcd1fcddae9fbff743ec99ed8dc555cd","sha512":"531b8ce8a57ab48a555bdf1a3d132c8f07d26abb9e7966e4996deaebfe0c65e9ee0fd57b69ec145822f113306ccb34c9b9905b97b8e7567ff2e0eafdd17280e0"},"length":26356240,"released":"2020-04-16T23:55:11+08:00","url":"/tidb-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"97b81dcf86850abda384d20e3ec3bdc193161c679404936bf47b1227c47a5665","sha512":"7571db342c9cbcbd2961b4d1041bc86547b124c5e0b61d55225021017e7cf7179775d531689892c9cecbbf80cae6d4ae1ed4a621202c62d3b171aec8ae786b10"},"length":26411379,"released":"2020-04-17T00:16:31+08:00","url":"/tidb-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"be51916d9b98a6d74702bf4840156460b3143447375d65172a9ba6b3f63ff8a8","sha512":"5c12c8be41db4313f70124147240662a3e27eb9e15d60afaeb680e729fe321cc1f07f467ec9cccaac5e315950448154d7a3bc6c8082d25f1f066666bb73f4734"},"length":26761632,"released":"2020-04-17T00:22:46+08:00","url":"/tidb-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f4c2b92c0f1e87376504f6d32a595f79a1707912264aab5de88b2559798c1be6","sha512":"b7c3d2a0f3e6c49a105b6234aba7b5a376e927d7b49693e9b1e16cd8e6f6dae793c4027268932e1bf21e472b54c85092caeb5711eeda237a54be257c1176b7cd"},"length":26260883,"released":"2020-04-17T00:29:45+08:00","url":"/tidb-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c99c6365ed606b4ff3ad8b6cefd582b665884b6e5ad7d70e30d1011f2834d1bd","sha512":"86904b5e4048c6a501a759fe3b0fe8e4ada47f0de74852e41b84025a9c8b9d8fbd46853c63b46165eceebdd6e9bf2e244e083eaf6e12bd7fda6571a5b501472e"},"length":26778819,"released":"2020-04-17T00:39:33+08:00","url":"/tidb-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f5c897a48971ccaf74737452e68cd959633bef0627e0c61f11c4e02ef42e7a3f","sha512":"ed595bbb7cf7b0ffcad477479c520af0bf2d963fdae9ddd9c1cc59d4c3d2f7b597a85d81c429d23e0756c8c6f9159016e90c390f41a28c6a66604217d47fa2fb"},"length":26787172,"released":"2020-04-17T00:46:32+08:00","url":"/tidb-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"05a702b4d1a893f6f9e8e12777670f5a503eefb6c8c23ff0832b8e196ae67dc6","sha512":"874a094901ade201ec847fd11fa985d249c1804ab38804dbbe788635d7a022825222e1e95c9adf15ac5c66e24c354ad1c208eb9e525770ea122f815035c738da"},"length":26903200,"released":"2020-04-17T00:54:19+08:00","url":"/tidb-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"88db693f22fb52e26eefda54304c871ece536aaba3b2a14e56695b026add826d","sha512":"afdb954d2ff909f1569ed46551f73d1b38eedeadad68be862e85bcdca81a4ef2257c722f29a970913d379422d6a3c4b0e8a75796bbfa7ecce458fa0c11e03cfa"},"length":26997279,"released":"2020-04-17T01:00:58+08:00","url":"/tidb-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bf7d58ee1565daa4d24370e32adeca87edded2f497ab277c137fa3e0652b4101","sha512":"f75dbf6ad349ae96bd157fdc6c0932627c230eeeacffe0091e123f3453136423cdc41968717c0263c562b0491dcfe0b5e9989fe3e35e0cc751964f250e27d95c"},"length":27796291,"released":"2020-04-17T11:07:54+08:00","url":"/tidb-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c4f7414be060716ba0b841e0fb0075faa1fa15d99db246a5500a527fe338e7bc","sha512":"895f1edaf11cca877b6ba6ce35f19b48cdf5527fabe697a8c11290d33d464204642527eb26581853c8b89fa69dd1a3dfabf940bb60a7d9ae99a55e9c45c19257"},"length":26868228,"released":"2020-04-13T16:07:51+08:00","url":"/tidb-v3.1.0-beta-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"47fd44d4c68195bec2bae4de912b2d6b3dc58cd74faa83f166d2939dcd2398d4","sha512":"7eb26d17f38e919b8970b687c1266c5cf3dec9db8ac7910597826fc45406d6aeec506bd7690309acc927c9b3c39d50400e92155a527464be2638428828047843"},"length":27077345,"released":"2020-04-13T15:45:38+08:00","url":"/tidb-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"cf2079cd886c3878db85a6484083d926f73562dcb93444e89efc9e444f534b8c","sha512":"97baba009d34171b882d46e16af1ad1524f56c2ef10cc3cc9fbeabe874367bad10f312701e751348ddc9bbc07a7957010fe932baab7cc558eb855c171ded9ec4"},"length":27717224,"released":"2020-03-19T00:48:48.266468238+08:00","url":"/tidb-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"88dca8e755713b9e5f21deab0eb0c7687f668ff48c1839ffa1d346b854721840","sha512":"5030bc57db3c0a002b9f955170f8a4f3233a334c27761b8c60c2b12280d128323ef6fbf5566ec851c7eeebc03c3c4d0dd0d2d8a750f3cd191a6a9227d5297416"},"length":27840449,"released":"2020-04-02T23:43:17.456327834+08:00","url":"/tidb-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bd7b1e1cbafac6b4875fef0360083dff74ee65cbcd72d2e4da7ea84c47f83f6c","sha512":"fbc8ce3048f7b09ecda269b891fb0acff320682711e59d11886674f22194cc5b9b2259ba4deac7d1beccf61a7cf29e3fb322b749eb41aa260f2970fd13852496"},"length":28015426,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7e6ebdebb1af2135119fc1cb34f1253b88f563f8b9e299e8d69a714bf5809043","sha512":"11a719f45b2dc4572aa1d158679d15a1ad1f02188790f016f78e7b62fb0038fa30bfec2ccf8a75b26ab11bbf572598652866b568052f24b6180b1f7f0ca8697b"},"length":32294889,"released":"2020-03-13T12:43:55.508190493+08:00","url":"/tidb-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"a17b34e13d27f9a0bb6f4c3736162af1fbf6d423b9d6ddc402e53bd88e3feb0b","sha512":"5f3debdbf73ef3e0de0dbef2f79d2cdb2516df6fc03103239a4e649759ebfc583fd7f326f8793598c617b004de80087bb90553a237b40b028809c927c1b787c8"},"length":33118956,"released":"2020-03-13T12:30:08.913759828+08:00","url":"/tidb-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c454f5aa16f94b36b834d1bd7be9fef0767738c9ccc1273b834b6bab04f073b8","sha512":"e936487500fd1b646a0a8f46ba0afc5032dd983c45789af71f918d269329b7d7fcaa9859a17c43eddd0676f1bf6100fbb3977d1ba3e54105725c54219b2fc162"},"length":33940011,"released":"2020-03-18T22:52:00.830626492+08:00","url":"/tidb-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"25621f17e8e9a919d546f8c06e521e9d79e006b73e290d0cbd6596f1bb8ed633","sha512":"80715580251cfb738e6ccd1a23abc3be3a2c1a857d633822fa5784c7962ec9566c4f90f4d545a2f40e5d70bbee206f4e768e299c92867c2d8c7e290ad602a406"},"length":34051221,"released":"2020-04-17T01:22:03+08:00","url":"/tidb-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2d5fab2bcc8793d85447ad2769eac8fca58d25de685039296e30cee0d0798137","sha512":"71ebc639cb23bd9671696ed43c974be6ab74781cf6543e5f3ff35ae69fac882557b6b49f8df8c7ee7cb5abeaef28b71f8edbb02d921a1aa2af6d58a33fc36b6d"},"length":34431514,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0bcd84a48071c4059519e64e743f40e6435670db0c4d9050f11b48136543a6ff","sha512":"f36c63ca590b624a0aea518f5141624b13c6d36b20f5ab0c0c973723cd481fee51753aa1b538a7fc1a81a3fb409830ed98576f0eb4d13b1bbcf31ce5eed67596"},"length":40116238,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"a90257c97d3c2ad4591899bee76328d2ca7db5654f55a39f5258bde1ddd025bc","sha512":"443dd0b778d2ac455687db8a891b88523061e86d8d7ea5c96f15bfacd8022a83565bac6af9e4f1cd99843f209bb08aee57b3e16cfe1b0321fe46286ccc620109"},"length":24791080,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0d303763b988d9262f5c7a750212b61786c56234823bde988922c1a205cdef13","sha512":"1db89fc07db4258aea659d832eb642561f59cb71de3eee14a52422889bf6c0ee03c99b27d7ffad20251b3267a680cc487f814f887e5a28e63ee69cc9f3ac9e6c"},"length":25904695,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"35209970a3014f3d655c220acf6259b2fe16c28f5efdfb61150681070e736a5e","sha512":"5149ee0260dd877fdd13821e243ddc8fc156475ca4a3fe0c04b066836c38170cff3f2c1611fbffde920e6d9833718a8bd174258073e1762ec7a1238c1c7ea77d"},"length":26595507,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1964979c80f03e20dda9c6e3b5abbc77fe8345c8a8b4bb2c15aaef92a72f2136","sha512":"218cb429a237e3d6d9e2dd7bc43b605a0dad5bae5a2a1cc0af3e8f7336b85ea54502a5729e8034c540c9c6b3eca97dc49c40124c106ea0c15335d2cdb2bc6fc9"},"length":32626370,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"63297933955cdfb166073722b4d2c1f95191f36e25dae977223889c3123f71c3","sha512":"56e1b8d267e658d4b58c6695976df01836386070cf5acc23cbc566f0e0102143381b85569f744c735eeddba9d7272243849e8e90d484d644347fa1b0a86bef91"},"length":38034242,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.tiflash.json000066400000000000000000000075101505422223000240470ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"QmCB7tKFMUNovrFuawCZNHQSQkUaZew5TOTI/nPbuLP05rU/VPmO/+DJ9PE3o2hytHqNmKqTYSTqNpRUajUsMYycyA2ANabVo4YuN+myuzZFf4NYxaXyZ3HIdNghKY2rE/K40R7TGdF/lydV2WOvg61vBoA0XQYz+VAMj2HC6wjm1CY64X33Wq+PQXDvAB4M+AWTu62EaOGlqx4qjsa1l4wqvqHh0LSH7WLGg2VRitoEqp7Y1k3xzR0DJCqyS/6LG2OWvsM1J5XOD5Pf7dtukTBaME3kqvRkhq41bl99ijE21j7wINxVz8Sm9oivqMIxtlxhMVM8lPSglL7if2tL6g=="}],"signed":{"_type":"component","description":"The TiFlash Columnar Storage Engine","expires":"2031-10-21T08:19:55+08:00","id":"tiflash","name":"tiflash","platforms":{"linux/amd64":{"v3.1.0":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"3fd931017f33b7e658afc52816a8e43d9a9fcbf8efac96299807f4decfe4bcee","sha512":"2472ef6507834378c2406cf2e5662f208f4a22c0b09f02075151cc5a777e37187c2e5045f69c8ded1c4e0edc583db979d373a75b2b6f2d63f5dcc5f5c36ba200"},"length":423897982,"released":"2020-04-17T11:37:41+08:00","url":"/tiflash-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"7b4052d4ad44b2882c5ac1155b1815ead40a4606202b135660907db748e283e5","sha512":"ef3580266b92db041f9cc60354d44c3b79fd28c9238aa6e265e7e7cadac532839a01724c226c3a5c8625f5717643ad1c764d76cd878586039a072f3fe40ebf31"},"length":118,"released":"2020-04-13T15:51:10+08:00","url":"/tiflash-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"7c2b1f29961a3f8d846d7798ac62c554a079edeab45032e81a2917253168296c","sha512":"767f4b949bf5a3eaff934f6550f5603661ae060fd29cd2ffe2ed417c4256747b6dd3e76468e398ff31ccdb5640b7039d83beb38c97a65642b753c774874d381e"},"length":118,"released":"2020-04-09T21:27:19+08:00","url":"/tiflash-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"bf2d690c133b08f1e99f4a64c6d8e362d1554694c6096af5e2ace8f2e304e9a2","sha512":"186f5d6345110cc3e23822ed6b2d8947656a295b82c69b4864225b004cdd1bb2eea860b41b3b0e02a3a645c42c395f3879be7908a180e1d8db9dc13ff07ed5a4"},"length":414926861,"released":"2020-04-24T23:23:21+08:00","url":"/tiflash-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"91134a7560d8a8a2018e8896ddc29e92d1f8f12f172f85d84a10313dd1fb83d6","sha512":"d27c31023d93ebec3544b5dae65b4329c2c795499dd7204f5373e3b89dfb4dfd7b2a811bdb562529f41c13e8fbfa5f82b313f940599eff4b31bf620cece15778"},"length":437892838,"released":"2020-04-30T21:17:58+08:00","url":"/tiflash-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"8d53e80eb4fad6dcb1af3c424c3e6f6cdef37839cf4934cb155b887abae22fdb","sha512":"9c87a3f9d9e135245d5d339ae097523baec9c7b77c2d604932ebb26d42d3a5329bd7922ed2d9c09773e0cd183251ead9b42da5decf2488b240557b83f2097651"},"length":419976272,"released":"2020-04-17T01:45:28+08:00","url":"/tiflash-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"f1a6765262b935cdce99dc723c7ee04d8479dd8c1e627ebdbd2ca830c23abf90","sha512":"9ce0838259cae134b8ab122972e1e7f1c53d466debfbdd9bd2053b074991ed653caf98923ef31f7c032e4406940d438c19a3390b09ba59c06ff408fc94aa3b42"},"length":444871635,"released":"2020-04-29T00:13:06+08:00","url":"/tiflash-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tiflash/tiflash","hashes":{"sha256":"e6df63690a28320b03a0574d0971e097a5c29977bf2d56268e46ddc9b92314e5","sha512":"6db9c8ede06d27ae99ca82a0ad278da02ce932b08a792b8f912bfd20cc761242d22eab1bf3f0f7b747a4ca9b9c408f3d5ad5d36ea318c7b22b10447c97343e4d"},"length":446026205,"released":"2020-05-15T22:05:35+08:00","url":"/tiflash-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/1.tikv.json000066400000000000000000000615671505422223000234060ustar00rootroot00000000000000{"signatures":[{"keyid":"ee1c9f923d347de20953663495e5af77915ae0a3962c1e3f120b7582b8e17598","sig":"AsKGSJDkAoIcQQSPRepv6TDEom0td9XZrXaNwlAves+gwjbKoCmX0dVNa8uC194hXFE9ZEXWRv623cVqEF7FQLzVjar8szDa+K8pMc1Or4lWXoV5+dwOt+b2qfiuLY5SI6m6uNTxAE4Byh62641w5v3Px6Cx6jXUVxahENvQf5ZKnMJW6Ul6N9VRp9W9w6kimnX3DzkWfTdIbcaavwGrXMXfP4xkfL+jlofl5y/acASAZRWnhFWnIu5J7E/LV+P+ChuLam/qhfAmQHk2Ajz34lgRPv/OQWRhRbzPKuS0A0OSFA6O091VCRS3JBqcBnk1hOEfp49H4KCK8Jju2yb5jA=="}],"signed":{"_type":"component","description":"Distributed transactional key-value database, originally created to complement TiDB","expires":"2031-10-21T08:19:55+08:00","id":"tikv","name":"tikv","platforms":{"darwin/amd64":{"master":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"53cb3dd6f7494f222332db6e4ef7b1ebd79961d0a23aed045f45911acebc6c3d","sha512":"f5c1479c8df6967b6171705b04ced42e5884eca3995d929e3a60137f44fcd53a8375dd6e13309052bad23fce26f9bbf60f51f996341b86f09cec9a83329a068b"},"length":15100635,"released":"2020-03-18T08:39:18.893151348+08:00","url":"/tikv-master-darwin-amd64.tar.gz","yanked":false},"v3.0":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"8d5316f8c8d48b01ec78576b9cbbef7fc4b94b813f5f636471080b7f405b4b3e","sha512":"a8ea91bf71c4abb1ce3448075f150ab07f480468b3b6760ea25ae515eabb63977856a7b9cb1fc808c934a20c75a2626f66c886676b3406c7bb343ac3e5d40764"},"length":10144877,"released":"2020-04-16T16:58:22+08:00","url":"/tikv-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"8c03e0334c70d5ec8d9fdab83268901b3c72bd92f39bbcf763aac5eef442d5e7","sha512":"e5d6c69c7b8cbf3fa40852f877e721fcf001437d9fb894dbc3233424fc326a2fc9fa46654d131631936f9f9e725a2068b6077d441ba23e99785f791d1849b019"},"length":10144885,"released":"2020-04-16T14:03:48+08:00","url":"/tikv-v3.0.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"975c55054fd0c36708d904dea6535f66c925d4c69795be9c2ee7645d4e6c38e5","sha512":"ea02eb8942be560d51b03f5f87f31985bfd540c357db579dfd0a53861fe6a6d989e9f0f95922db9b80a235e8a6e60612172f6f5c8c7860145ac41b65368f28c7"},"length":10064516,"released":"2020-04-27T19:38:43+08:00","url":"/tikv-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"340e8a66254696a136ce23d30eeb2c07fc3e1957deeeba931a8f269c389e03b9","sha512":"c2e7caccb78ea08fe1c43c3a2b61a5437fe75eba8616a2d1793b80dacf6a525f4be71dd6ce4c9aac50f4a6e2e5b08de79ff1e127b70a57c70fbc2d8b4f5ea23d"},"length":10218829,"released":"2020-03-13T14:12:07.829241447+08:00","url":"/tikv-v3.0.10-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"69bbdf6972cf3e02ba905976beb69be0602487560d7833661791460154855da6","sha512":"4d83f3ae1c443ca1fd8c63e44c716eab8c93c89725d5cce07ff593467cbef3af62b365ed57f6d926bc0e7e619ba4ac3972b0c7cb63b6c1f57c01830b6d13ca3e"},"length":10222921,"released":"2020-04-17T01:09:27+08:00","url":"/tikv-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"187c5fa1f8dd99598de60a719a31c5baaa022595f1f5ff25bea97dfd0f8282cd","sha512":"d4a7adfd1b2a213691e1224e38b1ee0f736062e08684da3b1b670f466001404510668fd1a647bbcee5d9134fd444e2e756ee1d97095946510d874c140f1c769a"},"length":10246007,"released":"2020-04-17T01:16:11+08:00","url":"/tikv-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"e303f30599b4093dc97b3a29700ef7c3142ecb493dcdfccc9b7e9d1e85ebdad3","sha512":"5b8a5aea3c0bcd4d463a24457160e4415d7096a6656676595de48833a3c497ec080b75c7a4956b193cd6fca33c8c5ac48fb87d2f32ca4210b4400de231b86f9e"},"length":10227706,"released":"2020-04-26T17:25:23+08:00","url":"/tikv-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"a07593fb5e26fa1e09c2241370d09ac0fba80efa5447c3084b38c907fc2725ce","sha512":"adfcf7314bc80cdb4d78365a4ee80789a3935f64b094c06c34b4146b6c94eafd7e313bca0c54d6e135d6af61a197074bebe7ecc05b19a55ff7aeff66b1cf6f89"},"length":10244768,"released":"2020-05-09T21:11:55+08:00","url":"/tikv-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"c31ff7b00701486028c502a17f10e056c6ac85f6a8a8e93e6bc47b4a94afcedf","sha512":"3f80248cdfa740b0b308365f94a775a1e9cfd33542ce08254a21cd6160b556a524294da2322aa87f360b8e7ec26afe5775e0f1f0950272cc8bd8a3b6e273233a"},"length":10092443,"released":"2020-04-16T23:55:17+08:00","url":"/tikv-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"8c638342bbabc88c1bb60907e809eaa449237199336dcea7712d5d25b0b5cbab","sha512":"265e5da9f98323ca510267f4e00217214617c6d09f5673a478463a1db8314a3fd71352f18f485e2ba97654037e3ddfaae7b1d3ee7dc455505faacb96009d74eb"},"length":10092470,"released":"2020-04-17T00:16:38+08:00","url":"/tikv-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"6935386699d6997999833a21dd40d93f7e11fe7beed449eb45b77315c5677669","sha512":"254b967fd1696ad1ad401872cd0eaf4946d17d2f3a7ec0c2757a22baf3efcca7856db9132dadd9215c94a360460adc9e21d0afcc1ee4c2f159794aabff7c12c2"},"length":10131033,"released":"2020-04-17T00:22:52+08:00","url":"/tikv-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"6baf43aa8afdbb125974b102ba30d9d283b5f78f6eed3626f1d0b6a6b76aacc2","sha512":"96466cdde3d1614ba031c188627d2772f7dfeb754a9084910f852152c1efd3d48c23509feee7ea9871d20bfefa48d21ef3fea04b930297fee1087cd040a2f53b"},"length":10177473,"released":"2020-04-17T00:29:51+08:00","url":"/tikv-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"2505258c5ebb87793aee864014ac27deee2bc9d3133f10c08691cb70785ec0bd","sha512":"c868fa01c17db502c765303156838fdc2ec6eaa7a468502abb7bc028264e38506e3013f8bba041b49accc93723007be3dc27ed3802050c5bb2568f2242ff3ca4"},"length":10203498,"released":"2020-04-17T00:39:39+08:00","url":"/tikv-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"557f78f441fbcd83d72aa594e3a025afc6944bbf9ef4ef5e5f147c5407f2cf2c","sha512":"cdacac875cf2b343b25981225142ccbedf5b301a2dae6786bd009a7c3c568913111fbc7301ec2f7e12f61cc1fbb0f9dc72e3fdf3caf6b992c36972bad02089d4"},"length":10207789,"released":"2020-04-17T00:46:38+08:00","url":"/tikv-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"a9b81d44262c84dfae8220a9017879e450295108caedfb934e48c498c229667f","sha512":"0cf47711030eb7fef2754a6cea4e63abf62aee20066d2d987c45930cdde7011500bf48be9ff0a45b7efe15428c20731c6dff4c762f10af5046f50ce03fdb4999"},"length":10207577,"released":"2020-04-17T00:54:25+08:00","url":"/tikv-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"42b399d90f1a2ea3db1920467557e3205cd6bd0bfcf1f776d4205687d759dcfd","sha512":"2627d40c6f242b34a53b31a0ed46ab5fb64b1818fb75d148db1cfd4ce2f30087bfec18ae2ab6416fddb3c2e679afefc221a8952309fccf2d4d89b8d3edacc117"},"length":10210047,"released":"2020-04-17T01:01:04+08:00","url":"/tikv-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"1fc074b537224412047448697e7df127a14e0d4f132543e9aa143b9968c18f32","sha512":"c48658ed6d3399ae30769efce5f59cfdedc5acd446e8edbd4bbd763a08f7efcdafdfd748e4f858da06418b29e57eb6f38085184765285710ae4f2f1292bf3b0f"},"length":12655928,"released":"2020-04-17T11:09:36+08:00","url":"/tikv-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"5b7d0830d79911f3c7f9150fb00dce2f0b4146fbf94b7048db48c2c31df813e6","sha512":"55f97209fb4e055ecb5c01c7c8cc5a4f0626b1be86d9688172742454746bcd172664096f5ce30de1bbb5c24ce0c1b8ee11e136b03ddebc936ddd768ada32ecb0"},"length":11167007,"released":"2020-04-13T16:08:17+08:00","url":"/tikv-v3.1.0-beta-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"34e8d61ecfa23c65f257769e9db822aae77ba235d5b1850e810070d0f97f1a5f","sha512":"fd71d15625e627846782fdfe8174832d4635930ffcc856c50a7c8e1fdf46e544e433ed71d2cdb3a73f5b621a3f89d7feb339dbdf9a4659b6ccd05b6d326a1f78"},"length":11145226,"released":"2020-04-13T15:45:45+08:00","url":"/tikv-v3.1.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"38692650988e32044590e67b23b9a108ff9dbfad0d01436aed9d7995622c01e5","sha512":"ed201e83659e0f54dd634f3b5bce3a601ab0989dfacc00f0ca3c3caa4ac686f8ce8396b581d8fd3da92948190ea891f76a069d398da7a1e71a16e1aac3f738f5"},"length":11896912,"released":"2020-03-19T00:48:53.820661874+08:00","url":"/tikv-v3.1.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"3d180ff9bbb49f9d23071b03389aaccd059d77046ecc6e6e71d14d14c9640af8","sha512":"c0c68ce93b6d99317bce3b4e5a2db55c7ee8dff562f83a5afa79d44d205c47344c2c4f31750542f9e47358ab990ba2ddd20d54f9d1c3fef6774c44949b4db25e"},"length":12653731,"released":"2020-04-02T23:43:30.216616194+08:00","url":"/tikv-v3.1.0-rc-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"40962009d3f85f2b76a22753517ef2715b2539d5215a224fe23f58f4fdd687be","sha512":"062841f19948c4032578608feb064894be6690ec38dc386b28d93a22f21f512d3a9440ba9c6cc8f81440d4a15baaac13a158555ed6b91ff10d6824b2878b9ce9"},"length":12701060,"released":"2020-04-30T21:02:40+08:00","url":"/tikv-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"c5c57f3b28324a13cd58088e6a3b5cc0f21d8f4e313bc3a545c51bd02aa45381","sha512":"27843974c3b33190c41a6811b840861d23e65607a840f09503f1c253b4f24a655ab8b06743b91b78d0309c43cb0066b7109140eabb0c8e4f0af94c599b2c420d"},"length":13247709,"released":"2020-03-13T12:44:02.910626468+08:00","url":"/tikv-v4.0.0-beta-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"f7ce5cd4c1fcfa4bac623554859de28eed8e3a9d422d8a190ee9c83a2d4b35be","sha512":"3b709677f012053844682e275369a9abd9f5108222eda4a0d40491d5f774801c0089040268f6de40cdb483b5efa394e40a04243546dbd0d5637fbeccab291f82"},"length":14446759,"released":"2020-03-13T12:30:15.605692554+08:00","url":"/tikv-v4.0.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"431287e165df9bc2b36fe1a2b61d7eb1c6703e9dd521b3376a54ded28e927b44","sha512":"3af7651a2230397638886a8eaa0fc15d6f026d086d4166ed0e07f972431f837e7768220dad4dedcb889c1ddf6ae6e0e5c81feaa4783e405ad679410765e5bae9"},"length":14562608,"released":"2020-03-18T22:52:07.17976717+08:00","url":"/tikv-v4.0.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"71dfb1f972e58839200569cd056e1d2e58079a0e875ec0898716af48bff7e01a","sha512":"c1624c832900f8ff5fdfce8b8732acb08d4e25cc50055447b5bdef967f1676821cf779355ffb6d0c5f6965631d0120234e8b471c0c293c2d0909eafd4956dbbc"},"length":15452127,"released":"2020-04-17T01:22:11+08:00","url":"/tikv-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"3b61b8cfe548f4817876777edb8924d98ca42412eee5eea5265a1dffda9fd888","sha512":"0f4dc285a63291b66ee681338426cfaadad9287fa3278df467fa2a583183518a152d8246d4e0f80866ce66c442233470a22a16924f1b4c45e452746f2c71a42c"},"length":15869608,"released":"2020-04-29T01:03:39+08:00","url":"/tikv-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"6146dcd965f219a04c5834e0316998b331023955448b9480f692e4a13ce50c3b","sha512":"7881f79fcee5de7e75a576a86af605ccc46019631268be3596bfc202ecdba6171386a40ebceee81741d6b3eec79bbca61456f7b43fbe3a8ddfc152332b9fc497"},"length":16069760,"released":"2020-05-15T21:54:59+08:00","url":"/tikv-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"3a35b5ae35bbae448ac4e245fc8e001103d848d02c9c0828faeb09bcf5d09296","sha512":"25118a433fff430a8670b748ac8efe6bb3cb3c6e3fc4598c89e5cf186f6a31aa3266634aa4c98c82f5df7b86446f121cb2e098170eebe2194e85520f8854414e"},"length":94432808,"released":"2020-04-16T16:58:22+08:00","url":"/tikv-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"814206bd544f25a44b0a81d861399fbdafc6a4915e3c4cfb7e46f685241ff00c","sha512":"5ab321d5d7e696b12473ca4a7e8cf8b3051ef5cab714bcc2807c348e34059bcf05ab282063636852f9409b3b8705e3b2f722fe9df3d7e480dd134e0abd85d0be"},"length":91842416,"released":"2020-04-16T14:03:48+08:00","url":"/tikv-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"2fdd9cab13baa58309cafafece541474165fb4cdcd6a240d449c34ccdb51614e","sha512":"683638c63bcb8e7a0d63bac0d57cc07e1f4a4d6d00e48e5866a2f82a238ebed83a85c9640be2e62b8449449a7b3fb0878d6e6150bde7d5c65b963cd5bd5ff435"},"length":91699106,"released":"2020-04-27T19:38:43+08:00","url":"/tikv-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"59a33f79bdb3bf84fbc31ef4da56d53308743d330c7aed835b0044c93ede88e4","sha512":"11caf148708671d33cb1eda1efd60295d8d5dab634a4e89765531a8f0c4a42b1eca361b0bd1afd0789b195e2e5f1156e6ae7f9fe3955cfabe44fff35a1729150"},"length":94547520,"released":"2020-03-13T14:12:07.829241447+08:00","url":"/tikv-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"8dcc3a1db50c1f1c01bcef659965a57834d43f94062444ac3fd904681218fc68","sha512":"ad4bc0b1a509bf46b9775a5d2b426d1b8333ef58f6c41bd1d699308859f67882248f7fa912463077e312a20a609e30ce1198446f6932862b0914f7c272338753"},"length":94515336,"released":"2020-04-17T01:09:27+08:00","url":"/tikv-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"559cba48834fc0d0c0e78780b499ffca8ecb468badbd010449f988581385b3a5","sha512":"701d718c2f2ba4f394c4b84088f6a9858faa6276d1267da5bc8bcdf21fe44f7760660afd924716fd19581517c4d1529222531742a73cff75b8c8273347c79309"},"length":94545420,"released":"2020-04-17T01:16:11+08:00","url":"/tikv-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"9af718d9b348969775d5e37c9a0f61f81b9fcd122ff6101dc1b1972cff578c2c","sha512":"d876f38d3e9a34fa85a79dd95ea4e3ecdcdb17bf826937994ee66796af1429299f0c97bda4426c8c2784305e84156b7d15c2183957b69b770bed707f365b0430"},"length":94543221,"released":"2020-04-26T17:25:23+08:00","url":"/tikv-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"76a8bfc4b93b6aeb119fa6c771d7fa5e2f5e67f496e5a91e2872a3654a979b18","sha512":"3324298cdc43307d8094e99e3e670ea3a32609411f090b22614beb1a571da5f4743c9043710ec710b6e01522b59402a8d5b5bf2b921c204b4f6ba00bb46345b2"},"length":94534843,"released":"2020-05-09T21:11:55+08:00","url":"/tikv-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"a1595b632828438ee5c53f00c2f1b65e0b11abfe130558c601842354359ed0c1","sha512":"239b272851012997c12d6f75328869ec3ddad3e4eb73297232c12b9d2e0d0f6be3c4900f03f45a2f6c68c39b78decb5e46abd4f852424998f90c80ea82420efc"},"length":91910995,"released":"2020-04-16T23:55:17+08:00","url":"/tikv-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"5124011309fcf8be4ff5749c6c0e54bb3ff6249bdfdc4424697bae93ee211757","sha512":"9f19f84277c22247215a8fc2650c8777a77f7ee77a9fd73051ad7a1a795bed87e4282eaefd230bdd34d55055baa2e06bc14c611cb74c82f8578fb38a7c504821"},"length":92047535,"released":"2020-04-17T00:16:38+08:00","url":"/tikv-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"8aec0863a256d4cf36929aa19b0c4df71ff668b554e57489914076c072a7164f","sha512":"8dcc9e05c5f0d5fac6bb7886f7320dbcb9fa552b9d63781c739d14732b47afea085997acc2af9c93c85288805fba5341fc356e87995c4b826aa606dc216eb30b"},"length":92643098,"released":"2020-04-17T00:22:52+08:00","url":"/tikv-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"ccc00ab7994ce4fb3a39862db168094fd8a0fcca38b7b940e13888e1e5e889f6","sha512":"d247b3cee5ab2374ee0271b0af57da0fdefb343deafecb51e9aca41384267167c6fe5d811270944c7cf5eee21207241f3b6a8a7d13011df98b860e115b52b5d4"},"length":93040324,"released":"2020-04-17T00:29:51+08:00","url":"/tikv-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"c955a2a29adbb48f4ce66ebd77b8191a97f27a698117476a925078aa576e8ae7","sha512":"cc149a5696b1009c3ec6adf4038f86c7165831ae6b70ce17b295623bce34dc338ffe5b0785d9cd895ab9689942941587074d408f70704b79fb512b639182d108"},"length":94484489,"released":"2020-04-17T00:39:39+08:00","url":"/tikv-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"b9b165e0f7c30239dbdde0e5e31bb23827caeeb2a7ffeeb1b550b8519f5f38ef","sha512":"2a4f88fde7fb2295fb422bee18093178a9c04609e301f60181c207ab06ac11347b89e4a01e9dc9600939669d7d3002948959be0de8c1a7fa8648b0179924ae93"},"length":94432808,"released":"2020-04-17T00:46:38+08:00","url":"/tikv-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"fe45d98b12d0ce7d2d58cd30a5d6fa1ecea726dcd2627fd44b2480ca4f9b820b","sha512":"36b4ea50e72152a26d57e77152c5522426e3b0089a897bba7e9aa58cbe7e9da174ba4eea799abbca203eadaefa21049571ba4b6db591a1fd8024f1edaf5b0d68"},"length":94441522,"released":"2020-04-17T00:54:25+08:00","url":"/tikv-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"f3328dfd223f2f26a6d3e0f36b37c9563a804e7b2fdd681f539ebe31158a8e0a","sha512":"f98ec21145a32b52f21572c0ab83eafd5432218bd2bbed8a39f2616e8101ed6b35104fda8fe8245bc40bd37bf8231cd93da3d56c07167fdba165fd97b0339abf"},"length":94480072,"released":"2020-04-17T01:01:04+08:00","url":"/tikv-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"0bd0f83505a6cc27f7b8fe75ee8e02a25d5a5c1fc8932ad69f3effd8ff3d7592","sha512":"7f0ee0fed813c44053886f662ee02869480b19b21685afd8f890fdfd5a5ea61aecee5e367fff34ecdba18baf3d8609471e669fb7337481a169f0c8f999c40845"},"length":116229416,"released":"2020-04-17T11:09:36+08:00","url":"/tikv-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"74ba640c4cf1e840381ec628211697e86caac8ade759f636542b46222efc7153","sha512":"f593f7b94d2f793cc50a06201dac8f8b3a2457e96ecdccd1650c7877d3c4b8b788b64b4eb3a3d5f71034c4039faae6d585b4b870f6656823705cc4227271ccc9"},"length":97973463,"released":"2020-04-13T16:08:17+08:00","url":"/tikv-v3.1.0-beta-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"e50e7e21e3d6f91b5fd7f4db0e6dad9ca360714ec14ab90da873c5aecde18f5b","sha512":"3f74b3f468328564efa7b860db7ecb878139353ae9cf344ffce927adeb0ce10e94b8180bea79e263b84c62922846add1b9dbd7c7fea3acf2a6b3b373b8f92099"},"length":98009995,"released":"2020-04-13T15:45:45+08:00","url":"/tikv-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"5d635ac4ca20747fdf0f8fe198aea98e3fa59bd0ddfec160b609489dfb5a6297","sha512":"508c3982453af5be99d12fae061f183a6aaa995dcb2e7d4de762ef3bffb2029f4bff4c0e94573070157e6b4699a7121a29e8365e467125498310b60bae6e2716"},"length":107973917,"released":"2020-03-19T00:48:53.820661874+08:00","url":"/tikv-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"4ac4859381c01ee85eb2f4d73f0fb35ab119820ea4a5e01defc1a74d95552084","sha512":"162a4a45fd9a59836bc396f9ad95da0f5ff0e4026b3bb3b095fd78ffca600c8359de96b0fa274512f7abfb685bd301a666b10fc885b30f1e852ec65e1d4489a0"},"length":116235199,"released":"2020-04-02T23:43:30.216616194+08:00","url":"/tikv-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"5a8b91a6bcd7c4a29dfe36d56d8a51e93fa319011eaa9eb50911f75fcdf6fe2a","sha512":"7e3659208429bc07234427a5951b0b1b39e477b77eb3bfe49dfec02a87e1d96c5edae67f3166a7a91d0a5636b105049c7e9fb636e469c058ccf04ddade8105d5"},"length":116354735,"released":"2020-04-30T21:02:40+08:00","url":"/tikv-v3.1.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"fe395931424c220badf76262782215a2fe46f65fbe9f9eda56c83a9abab67e12","sha512":"9c40cf1b3bfc98ca8af8bbcf8499f47be817e8140438a3b749282768afb019f33c1ea791116187bc6ca4c63190cff784062afe528b12b4b7ca8f00545bd95021"},"length":118500039,"released":"2020-03-13T12:44:02.910626468+08:00","url":"/tikv-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"9ea7b647c4a276f81c6b8841a1b7223d609f4b2118f47746e493fdd94da25eed","sha512":"d15de5cdee9801d162139cdd3e69a390ba94c1124d81818adbbc4b83cba0821ace279d9edf6f01ca8309eb09e691ed310ff1b4f05a2799434ecb4ba3c3e75443"},"length":125481628,"released":"2020-03-13T12:30:15.605692554+08:00","url":"/tikv-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"0c81de73061610b74ee6236f6e63a520afe460d285672f0b802404b1b21fad97","sha512":"b71a2861596c66b62334891a7d91b035352a16c1d00b1e65b6a6e7a491a82f8848084cf42aba929dd8d8e720083d00579a78bedfc5ba7de0c30762b6df59f651"},"length":126083546,"released":"2020-03-18T22:52:07.17976717+08:00","url":"/tikv-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"100cb86b723dbc66494720d5cc3b1667861fd5117e3e790f3b0b7eea3206def8","sha512":"a1a616a770974186555a9906520036ecb7e330fd5dea28eecf00d9e07382e3d4bd55f12be8b8ffae685127619b63277944a8271dab99860c252ac540a7681e1f"},"length":133927974,"released":"2020-04-17T01:22:11+08:00","url":"/tikv-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"4ab58d0fb8a9f6eeba8080f8f32de4998387f17b9c005e57b5a38b9644957740","sha512":"b8e7f3f855ae866069090b46998c7b0692ce6e1c1916364b2099ce9747f8c0c71d9a061b7c75e9b934da70c019782aeee9841ce40c890a61396da67b368a58dd"},"length":136951948,"released":"2020-04-29T01:03:39+08:00","url":"/tikv-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"766f7a4ca0d4eaa2a2b5b3bb6a4c118f65cca4784aed1240d0a2a4c367c0698c","sha512":"8962877a2730fa644e3e4a3265069470ee3cf347b6a46bbad61da07575eee4e42f21da52f54699e38230711ba5a21184a2985106562f6f55d7e5f866b1d288f4"},"length":138379849,"released":"2020-05-15T21:54:59+08:00","url":"/tikv-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"8cf8db77cba7080ff57a5edd3d31bd27bf58563be5267580b6893d412ddfa9b4","sha512":"8395a259ae4eac987d188e02974154cfb849b10da19d95d85910c5357927b2c53808eedfb41f262475d032eabbd8a4c75d128f0aba3a3d9c38d7bb140a7dbafe"},"length":91463382,"released":"2020-04-27T19:38:43+08:00","url":"/tikv-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"e9195f5ebd79396db2cbd679158d8000e8810af7e980a628af3f7311d98616ec","sha512":"2dd003da4969621bd284199c68e4c55dec0f8a6b97c77a49b63ea3cf0c033cafc9052e9567ac63c7befe19470962c115061ce2a6bb914df18c60af23ea127419"},"length":94258441,"released":"2020-05-09T21:11:55+08:00","url":"/tikv-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"f62d144fe3f5fa2f5d8cf9935f40d6d709a8aff41694a18296d6bc8bbd4e3d05","sha512":"f633ace0cfbc78b734d1ec1987c282bdbf6479a4797113a6a19d409f231bf8c3bf106c3af16f4047a1329e9e9f9d23144073e6693916a6a5453760e8f0638e16"},"length":115438319,"released":"2020-04-30T21:02:40+08:00","url":"/tikv-v3.1.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"c3a354f430ff51eb4e1d258f25161dcab04ed99a79fbb5cfc886e5174d890882","sha512":"79fe2776e257ade4a375baba3d66735dfefa980f962902d5dd6f09facf197f5c4da9a5aa5446d44781c820ce42e37f76a2c734eb6319aa10c84de6b82c0d17ef"},"length":135607728,"released":"2020-04-29T01:03:39+08:00","url":"/tikv-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tikv-server","hashes":{"sha256":"a19f2da38bc2566f5a875e589291d7806ff3dc8403e761fdea1f8abc39097905","sha512":"4632f251ee1cba68c77f6582ec83098418f64c067f32f3d386c32968e63da7e3e7790e3f23edbf422192449994fa0d2b95a4ed825d1f3963267aa4500f11f4ee"},"length":137026542,"released":"2020-05-15T21:54:59+08:00","url":"/tikv-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/manifests/root.json000066400000000000000000000161531505422223000232440ustar00rootroot00000000000000{"signatures":[{"keyid":"18007f60ccfdf9fa5bc79b09464880c054ce34289e97ad93696b85cc43aed314","sig":"qBPdmrhrBW9FPX5urZ973C8nLmwFEOstUBUeVKZIOws81kgpcxQGOQep8iYOKA01O16c/EjI0KwVcjp641t25y1I2AlQ1Wqz2svS/LhTTRs8z8csUWlnxK2gOykjmkaWS3MVUNTlewpBUq/Jhqo5+XLm0Df/VKZwr4wUH0UuJ67QdaY886MXRgRC5JFEYF0VD6z67oa+V8zK3sfjan7FOtppD/anWk4nmd31mnYq3vQsiXgodvMgfo2AGQ1u73Ztunf2jKiInrAJWifBJ9CHvVHU6Kv9mcCOSf4orpPFndw2vmH/hyF/a1RZIg9J/JRzVR03uRp9AAI1+bYSKTuZvw=="},{"keyid":"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a","sig":"SuF1pM8SvDw+t4BeLgSbojJo5Bb+/3hGjQ9RRmnJEAf7BvyjifaU6kz57a+DhabbCyzHJQJYwfA2AzHUYGGRLC2xyXL0SY41J6qqXQoqYTuyqeHGZ2dpGH7lwyzf9EWyY0Z9O/BN0+WGuh7UAtY6uauNAJt6TryKwLpF+AiEZWFcVnDC76uwmEJBJkxAzSL0l28ansmJm6D0k/UbFp0KmcOFN8UJwtx6OrjPd0tPK/3+67fkOqefZPA1OrjfV82t6NEERc/hu4NIJL7JX/PKSYmStLnQmb6lJ+pwlvkDfsYBAQNn7kHmtyc2XAV7NrcccG6Z1c71x+ChUJce6e4CuA=="},{"keyid":"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002","sig":"PrWmZLR7RX5gaUzEg/X93YWvylEVTP/oMhr1mEiVwqU1lf1FPFK+H1QNsYOFpekj1/90KEvBOxl1aDyfDIIGPqLYA1Hmo08Lt5Lqjw9Jv4urXCSZakuQeLFqXGXrXHHBz+tjKQ5W/1XXzRnxjjvkPnsNkj7mxy7j9HgJ4SDiQXCnWtuBOLKi5AOnW3B0zee3kz6tY3b4iKezT47qO2AGa097sqNpdNAG4QIj9bCLZqYGNYl/Xk2EYuRiH4bzuchQLSel9b20kF8dZ8JiWgfrMIkxBA690nD+g8LfRPPxIxCeW1t2ne9YMFnzvjFdSTmrleous7bHxGSKD+Btmdk1Tg=="},{"keyid":"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae","sig":"FKxj84RY10ikxmTzG7VSq2mBCKzpEBgp5Igf9qWla1jeGpPhT51+6Amfxp8M6WbQZBzJG8PiSKetGCXID9Zn/X4Cxg7/TOoH/h4R1KwWQik9rVXPuFyiwsz+aLyrG0ZYU1RTQWmVp/i4eTQZaK7DIH3tmHLWrMj8WuL+dU9RlQW2xRyHV265l3Tpg+eC2E9Ucgxy4JtfoNiGZgnWL7prFFP3Ktu3+wpjfIVfDMPg7u0dqX+ROzv7OAMF2N/ZEW2XmT3tNSuaw77s1JmDudqrTlg4k5foPJyflgLQgAssT0sg50QonYP7O9BlPWRjn+1HRjmrfDGIkZZOUhunDp7jNg=="},{"keyid":"ef644d810a1f1dcce7078ae5b2821cba346a2eac0a371e56feea9e07a5eade37","sig":"QejezQQxHP0VHizh5nMtgxrahCnQgApmwpWs8mIxp1NnnBr5RIVdF9eUykafu6/OXGY8Vy8qEJvfxj8N1jw+rvbC9Ghgcej0Ijj9RQAnyTqQ5PeOzmg4h8ZdkH4J2YufwMWYcI/+qlM6qQusuyahAa5xKkxYCQWSb+Qw0Btd066VwajIUmjI8unUIwtm8miGJbA2GjUTgSnCazw3vavxjy3FR1ce1KpzdigztkWtuABQS8xJ7/ttJPAKokyq2h4RFV9hrvDKZNDfxIBp+1M7f1VqAthaact3nHfcsNoXQxxrDszhAOlL7b22riloD5Z7lNg0jeaDk2l6NyX5J1XfUQ=="}],"signed":{"_type":"root","expires":"2026-07-26T11:18:30+08:00","roles":{"index":{"keys":{"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn5kVA7MlBfSe7EBaExjl\nKbwoDkn1aYi74s29mFgtRo8nejbrVvZQMCIUhvKc0pFa/l9JD/QY6/nAOCE1lpzi\nwwNkSntfOo3p3HQIR+Ut7hZ4Sxfe/5JagGo3LQ+Hd3EJWUxyEfQ/Bff07F3XAbqM\n5+cKNrdsKWZJcPiJDW621qGwCx52f+gzl9bnFe4/hx34OUgirwqh5DS+LhIO+/yt\nbOiN1AyjQKlnb8lUnblElS4Njd+F4io5VzSrZYi2+4AbTkO6wLwbsWHMzXfv9qwn\nvllufOHpB6EwiQ/xBOMuvJJymHnZvs8AH4SuydQIXLaJuv1ysFaBs0KB/ktbakSK\nLwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/index.json"},"root":{"keys":{"18007f60ccfdf9fa5bc79b09464880c054ce34289e97ad93696b85cc43aed314":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4DYlVfoIQTlyJij0ynjh\njqUkayqXX5c9VXw1Ud3mWCOdThy6V0bmsohgSBeHrfVroSCfsAc5VCUlaSteZeFl\nQEZxpRWDCmSYGslOQZqe2cJi5aqyQOYeU7JLjlfAausLCR9636SfEvQoaCEuGsUI\n67yCVWW2oQ756egUNmOrOSd7Qh6IGuuj9FQb9vExPXTxQw7j95ENOsc1V2lAXCEG\nS1+Nh4NIKdpLOXAohbcpq/HLjddmEAj2GXHo+asITlHCVUQvf574Vh5yLkFWnqj0\nviyRq0jJa9P+qA2oy80a3dk3FBCPu0sov6GfUIC+NtkDfjOkKfluBF9WapqR9wt0\noQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyDwCfCl30vhyJW7fB1bs\npRYKtBKzl7o0qnJTm+IksjQ8RXxj8osUpMLmSvOzCaJ5Wxe+Pm1LpSTDbbubbgvd\nnmEFL6228sifviNIu2HlIl+agfzmXuJ9OBlzGUaI4gAd1Z6pF6+mjlcjz2PbWF84\nAbXZdK49uluqulp7HrGB/qNjGcIRUCHgDU4nnq0OkI1BZZSKm9ovonqDkIK76x/S\niAD9OjKsjQ/s57tE+5WTVObKpfrfK0JeHdpAUsA/2n4L1Z6FmZD4LZWqb0i+C7xj\nMElC99KtjlwRntcjeVWG9YjU8AcEN0n1gON9S2oRdyyAzDTgGb7WueDnn6qstt5w\nSQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOgQkwLOh31QV9OpbO9v\n6o83durJFGPOnVXZiab83pKaSk7HEK9WzXBq0BaPvtFwSfROVdpgtopri5lZi+uH\naMKLUn5F8XRnSMl/7m5vM4XpZZYa4aQId4TWdbFtTu31eHGZ3eEC5nDRJ5NhZOJd\nKLFBu/xmxrh/eNZt4QbdWLZayjHnzyoy5AnfNTR6nJgPAv+rBOqyqT/r14q4Pngh\n3z0I3pNFr5qmxsp013XV+kgOW1F7zT7IMU8xRIgo85UWUNhax0/bjY/2NI1Z+WjR\nyhZmUBMVYWvfw97xDUrvBvrJxZPgg0lGvxJC6LF2dM7wgLaNx9khT6HMBVxjxLMs\nDQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"ef644d810a1f1dcce7078ae5b2821cba346a2eac0a371e56feea9e07a5eade37":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqsL5sV9dhPqnkc3dU2xH\nVRPVuH1bebET64bJya96IXjR3Um/IbIikmIpAL8KbY35h44hR4nNwUQZcQggo854\n5SxDi5LiAkMqdr9uq5mXp7sZXb0HcuHX97BqTUvTvr+t05KaON81ikdVGyRw+Qus\nFFXZO2Pj0w0I4QD87nISAuK0wQJhD8robDzO+Qf2K5cHXjEu5DGNc+wq66pJWCwt\nDl2BAvkF86Y3kZVuEQ6zp5PPQh0l++0PtzY/NNNHiLm7JUSlmpXyis7f+FaCEGl0\n4JWs5ImJg1XjUo2AsSnlFZ3adrPJ4NHFo64ui0/JsEAhn1TBWLL4AhT9kVIBMXI4\n0wIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":3,"url":"/root.json"},"snapshot":{"keys":{"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqTZx29eJR5EumjqM4YTb\nFlKbim1GNYmtbCLH51BbU2lt46ddmfGvtGsxTD3mIZ/GEHVFv6Aei3xx5nIfhGP0\nrG78JRz394uU8Pd62DiIFWYizr5o+ZBZu29D2YK5ZtxoLFpgt0ibnINK2NcesDC8\nSqfIUbMiQFT6yB/MYD275SjfRGHOeYTPmKdjMJrhLL2cfIPYnQ0QFYIyMvXBG1Fj\nU0rc9UclYQHh9YheIDVYI9YCo/DWP3KFfRJpoTjQRGoPSK9TXcpCAEzQpEG3jOek\n9PdV9Ol6/O8JbrFwXWF3LhkUThg+zCjV4qHtP4oqp5QCqzTQTXGQ9qxWUSlHi4Eu\nIwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/snapshot.json"},"timestamp":{"keys":{"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzTgV5iKhMnunUDxt4PB\npYqTMPaJN/ZdOOsP6cS3DeCE/EcYGfgCjvP7KD3gjG98VDBTVcuwZClSy+/zvHhV\nIq7VWu+yxQL5c6oa1xpCyHoA96JiLIDPhmqEdscdRybcRQ2CYywzKA8jSwEQCnEK\nc8a74ceY352l/MEcOem0+AtKrOjqcjbXCayDwC9yTg/c78bkp+4T8AhSWgt6Tlrt\nY8jLE7zwojFtIYtMwobWRIW2O3nJDXiSBbTPG3M9kF1G43INshSdBcuq5Tmy8lpE\n/XiG/E7+hP63Hm+KAcdvl553Zs7pLhAZxV0kqlApqRRwhscw+JQci8sVONun5t9t\nNwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/timestamp.json"}},"spec_version":"0.1.0","version":6}}tiup-1.16.3/pkg/repository/testdata/manifests/snapshot.json000066400000000000000000000031231505422223000241110ustar00rootroot00000000000000{"signatures":[{"keyid":"c0bc219c07f04771557c54fac348c4d3a3122e715442d25021600d39274de064","sig":"uCvyRrHfYb71kuzm1YzEdDBD1C1tDrojlBWtq28F1R0MhXRvektd6OT3kaCukMs5BbDcA9pXk7o6xm1il1FRdQMZklR8VM/h0hSmRB3uypZMw5T849YwbdEIt+9irs2rIcfLJrSA96m4kZA7/ZRS4aRpGBQEZH1hya22SYXUUIk1YJJiFZywHyl/1cjFyEeA+l2sEPTkb7CYc8ZRVQxYoYjM6oKT0wR1Lt/xtHjoKC1yzUPqWTU+wmKd6+iwnPlRFbE7LY+45zSMwu/3HmglSlIyi/G3Y5z7+dYog3BrQcEgKu8/D7XtA9pSiooA3rPP6UT23hqiEY8ZMew0c6+1QQ=="}],"signed":{"_type":"snapshot","expires":"2031-10-21T08:19:55+08:00","meta":{"/alertmanager.json":{"length":1944,"version":1},"/bench.json":{"length":2231,"version":1},"/blackbox_exporter.json":{"length":1997,"version":1},"/cdc.json":{"length":3403,"version":1},"/client.json":{"length":4566,"version":1},"/cluster.json":{"length":22841,"version":1},"/ctl.json":{"length":7252,"version":1},"/doc.json":{"length":2961,"version":1},"/drainer.json":{"length":20477,"version":1},"/grafana.json":{"length":25928,"version":1},"/index.json":{"length":3017,"version":1},"/insight.json":{"length":5525,"version":1},"/mirrors.json":{"length":2266,"version":1},"/node_exporter.json":{"length":1957,"version":1},"/package.json":{"length":3787,"version":1},"/pd.json":{"length":25212,"version":1},"/playground.json":{"length":7819,"version":1},"/prometheus.json":{"length":26305,"version":1},"/pump.json":{"length":20170,"version":1},"/pushgateway.json":{"length":1943,"version":1},"/root.json":{"length":5221,"version":1},"/tidb.json":{"length":25854,"version":1},"/tiflash.json":{"length":3912,"version":1},"/tikv.json":{"length":25463,"version":1}},"spec_version":"0.1.0","version":0}}tiup-1.16.3/pkg/repository/testdata/manifests/timestamp.json000066400000000000000000000014661505422223000242650ustar00rootroot00000000000000{"signatures":[{"keyid":"db75ccab60452c4fde6c7a42a70d91b26f24bd56ae315dbd1acdf2356664f1e0","sig":"KEDOJ1G9JY+BfhFGOAXHXhJNwkL0kMqpadSPzIY62XeNteMlq70fJEQQc/esm2FzU2+Iqh7mNOkd5URdFFZsXPtFrrvvTAS33DNMijZYRhPAPu1CaLDs1dQaK/DKdIpJBcKvCg03arpEI+hVpWJUXEhpqOnQyFYsTkhhmZPFqlsY2wL8hqf+CW4SOhy+g5Ex4+2oejGf4UcpuYhA9ySs4EHn0bb752ylJZyY0E2SMZ60tRBGsDjaSKR6GHYF3gglxd+y1kcXlrhglfvLDaxRKdWV12283eTg4ebDHaAXjY+V6VKYcm2IZKlDZHjRdzXZdHSH6QWAkMs0+I/79mzkbg=="}],"signed":{"_type":"timestamp","expires":"2031-10-21T08:19:55+08:00","meta":{"/snapshot.json":{"hashes":{"sha256":"45e467458801ea7533c367877971f2db5b28eb93e963c488ec4bfe5a0ef8be08","sha512":"90463dde24b02282bfabb579e704e204f8c338d6987429d14aabf5ba01ca4ba82a10d6958eb41b0986cc666b6c4b4d1677b12442b74bef6fbbdb934bd30cac5e"},"length":1048576}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/pkg/repository/testdata/polluted/000077500000000000000000000000001505422223000212175ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/testdata/polluted/bin/000077500000000000000000000000001505422223000217675ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/testdata/polluted/bin/root.json000066400000000000000000000161531505422223000236530ustar00rootroot00000000000000{"signatures":[{"keyid":"18007f60ccfdf9fa5bc79b09464880c054ce34289e97ad93696b85cc43aed314","sig":"qBPdmrhrBW9FPX5urZ973C8nLmwFEOstUBUeVKZIOws81kgpcxQGOQep8iYOKA01O16c/EjI0KwVcjp641t25y1I2AlQ1Wqz2svS/LhTTRs8z8csUWlnxK2gOykjmkaWS3MVUNTlewpBUq/Jhqo5+XLm0Df/VKZwr4wUH0UuJ67QdaY886MXRgRC5JFEYF0VD6z67oa+V8zK3sfjan7FOtppD/anWk4nmd31mnYq3vQsiXgodvMgfo2AGQ1u73Ztunf2jKiInrAJWifBJ9CHvVHU6Kv9mcCOSf4orpPFndw2vmH/hyF/a1RZIg9J/JRzVR03uRp9AAI1+bYSKTuZvw=="},{"keyid":"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a","sig":"SuF1pM8SvDw+t4BeLgSbojJo5Bb+/3hGjQ9RRmnJEAf7BvyjifaU6kz57a+DhabbCyzHJQJYwfA2AzHUYGGRLC2xyXL0SY41J6qqXQoqYTuyqeHGZ2dpGH7lwyzf9EWyY0Z9O/BN0+WGuh7UAtY6uauNAJt6TryKwLpF+AiEZWFcVnDC76uwmEJBJkxAzSL0l28ansmJm6D0k/UbFp0KmcOFN8UJwtx6OrjPd0tPK/3+67fkOqefZPA1OrjfV82t6NEERc/hu4NIJL7JX/PKSYmStLnQmb6lJ+pwlvkDfsYBAQNn7kHmtyc2XAV7NrcccG6Z1c71x+ChUJce6e4CuA=="},{"keyid":"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002","sig":"PrWmZLR7RX5gaUzEg/X93YWvylEVTP/oMhr1mEiVwqU1lf1FPFK+H1QNsYOFpekj1/90KEvBOxl1aDyfDIIGPqLYA1Hmo08Lt5Lqjw9Jv4urXCSZakuQeLFqXGXrXHHBz+tjKQ5W/1XXzRnxjjvkPnsNkj7mxy7j9HgJ4SDiQXCnWtuBOLKi5AOnW3B0zee3kz6tY3b4iKezT47qO2AGa097sqNpdNAG4QIj9bCLZqYGNYl/Xk2EYuRiH4bzuchQLSel9b20kF8dZ8JiWgfrMIkxBA690nD+g8LfRPPxIxCeW1t2ne9YMFnzvjFdSTmrleous7bHxGSKD+Btmdk1Tg=="},{"keyid":"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae","sig":"FKxj84RY10ikxmTzG7VSq2mBCKzpEBgp5Igf9qWla1jeGpPhT51+6Amfxp8M6WbQZBzJG8PiSKetGCXID9Zn/X4Cxg7/TOoH/h4R1KwWQik9rVXPuFyiwsz+aLyrG0ZYU1RTQWmVp/i4eTQZaK7DIH3tmHLWrMj8WuL+dU9RlQW2xRyHV265l3Tpg+eC2E9Ucgxy4JtfoNiGZgnWL7prFFP3Ktu3+wpjfIVfDMPg7u0dqX+ROzv7OAMF2N/ZEW2XmT3tNSuaw77s1JmDudqrTlg4k5foPJyflgLQgAssT0sg50QonYP7O9BlPWRjn+1HRjmrfDGIkZZOUhunDp7jNg=="},{"keyid":"ef644d810a1f1dcce7078ae5b2821cba346a2eac0a371e56feea9e07a5eade37","sig":"QejezQQxHP0VHizh5nMtgxrahCnQgApmwpWs8mIxp1NnnBr5RIVdF9eUykafu6/OXGY8Vy8qEJvfxj8N1jw+rvbC9Ghgcej0Ijj9RQAnyTqQ5PeOzmg4h8ZdkH4J2YufwMWYcI/+qlM6qQusuyahAa5xKkxYCQWSb+Qw0Btd066VwajIUmjI8unUIwtm8miGJbA2GjUTgSnCazw3vavxjy3FR1ce1KpzdigztkWtuABQS8xJ7/ttJPAKokyq2h4RFV9hrvDKZNDfxIBp+1M7f1VqAthaact3nHfcsNoXQxxrDszhAOlL7b22riloD5Z7lNg0jeaDk2l6NyX5J1XfUQ=="}],"signed":{"_type":"root","expires":"2026-07-26T11:18:30+08:00","roles":{"index":{"keys":{"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn5kVA7MlBfSe7EBaExjl\nKbwoDkn1aYi74s29mFgtRo8nejbrVvZQMCIUhvKc0pFa/l9JD/QY6/nAOCE1lpzi\nwwNkSntfOo3p3HQIR+Ut7hZ4Sxfe/5JagGo3LQ+Hd3EJWUxyEfQ/Bff07F3XAbqM\n5+cKNrdsKWZJcPiJDW621qGwCx52f+gzl9bnFe4/hx34OUgirwqh5DS+LhIO+/yt\nbOiN1AyjQKlnb8lUnblElS4Njd+F4io5VzSrZYi2+4AbTkO6wLwbsWHMzXfv9qwn\nvllufOHpB6EwiQ/xBOMuvJJymHnZvs8AH4SuydQIXLaJuv1ysFaBs0KB/ktbakSK\nLwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/index.json"},"root":{"keys":{"18007f60ccfdf9fa5bc79b09464880c054ce34289e97ad93696b85cc43aed314":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4DYlVfoIQTlyJij0ynjh\njqUkayqXX5c9VXw1Ud3mWCOdThy6V0bmsohgSBeHrfVroSCfsAc5VCUlaSteZeFl\nQEZxpRWDCmSYGslOQZqe2cJi5aqyQOYeU7JLjlfAausLCR9636SfEvQoaCEuGsUI\n67yCVWW2oQ756egUNmOrOSd7Qh6IGuuj9FQb9vExPXTxQw7j95ENOsc1V2lAXCEG\nS1+Nh4NIKdpLOXAohbcpq/HLjddmEAj2GXHo+asITlHCVUQvf574Vh5yLkFWnqj0\nviyRq0jJa9P+qA2oy80a3dk3FBCPu0sov6GfUIC+NtkDfjOkKfluBF9WapqR9wt0\noQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyDwCfCl30vhyJW7fB1bs\npRYKtBKzl7o0qnJTm+IksjQ8RXxj8osUpMLmSvOzCaJ5Wxe+Pm1LpSTDbbubbgvd\nnmEFL6228sifviNIu2HlIl+agfzmXuJ9OBlzGUaI4gAd1Z6pF6+mjlcjz2PbWF84\nAbXZdK49uluqulp7HrGB/qNjGcIRUCHgDU4nnq0OkI1BZZSKm9ovonqDkIK76x/S\niAD9OjKsjQ/s57tE+5WTVObKpfrfK0JeHdpAUsA/2n4L1Z6FmZD4LZWqb0i+C7xj\nMElC99KtjlwRntcjeVWG9YjU8AcEN0n1gON9S2oRdyyAzDTgGb7WueDnn6qstt5w\nSQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOgQkwLOh31QV9OpbO9v\n6o83durJFGPOnVXZiab83pKaSk7HEK9WzXBq0BaPvtFwSfROVdpgtopri5lZi+uH\naMKLUn5F8XRnSMl/7m5vM4XpZZYa4aQId4TWdbFtTu31eHGZ3eEC5nDRJ5NhZOJd\nKLFBu/xmxrh/eNZt4QbdWLZayjHnzyoy5AnfNTR6nJgPAv+rBOqyqT/r14q4Pngh\n3z0I3pNFr5qmxsp013XV+kgOW1F7zT7IMU8xRIgo85UWUNhax0/bjY/2NI1Z+WjR\nyhZmUBMVYWvfw97xDUrvBvrJxZPgg0lGvxJC6LF2dM7wgLaNx9khT6HMBVxjxLMs\nDQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"ef644d810a1f1dcce7078ae5b2821cba346a2eac0a371e56feea9e07a5eade37":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqsL5sV9dhPqnkc3dU2xH\nVRPVuH1bebET64bJya96IXjR3Um/IbIikmIpAL8KbY35h44hR4nNwUQZcQggo854\n5SxDi5LiAkMqdr9uq5mXp7sZXb0HcuHX97BqTUvTvr+t05KaON81ikdVGyRw+Qus\nFFXZO2Pj0w0I4QD87nISAuK0wQJhD8robDzO+Qf2K5cHXjEu5DGNc+wq66pJWCwt\nDl2BAvkF86Y3kZVuEQ6zp5PPQh0l++0PtzY/NNNHiLm7JUSlmpXyis7f+FaCEGl0\n4JWs5ImJg1XjUo2AsSnlFZ3adrPJ4NHFo64ui0/JsEAhn1TBWLL4AhT9kVIBMXI4\n0wIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":3,"url":"/root.json"},"snapshot":{"keys":{"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqTZx29eJR5EumjqM4YTb\nFlKbim1GNYmtbCLH51BbU2lt46ddmfGvtGsxTD3mIZ/GEHVFv6Aei3xx5nIfhGP0\nrG78JRz394uU8Pd62DiIFWYizr5o+ZBZu29D2YK5ZtxoLFpgt0ibnINK2NcesDC8\nSqfIUbMiQFT6yB/MYD275SjfRGHOeYTPmKdjMJrhLL2cfIPYnQ0QFYIyMvXBG1Fj\nU0rc9UclYQHh9YheIDVYI9YCo/DWP3KFfRJpoTjQRGoPSK9TXcpCAEzQpEG3jOek\n9PdV9Ol6/O8JbrFwXWF3LhkUThg+zCjV4qHtP4oqp5QCqzTQTXGQ9qxWUSlHi4Eu\nIwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/snapshot.json"},"timestamp":{"keys":{"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzTgV5iKhMnunUDxt4PB\npYqTMPaJN/ZdOOsP6cS3DeCE/EcYGfgCjvP7KD3gjG98VDBTVcuwZClSy+/zvHhV\nIq7VWu+yxQL5c6oa1xpCyHoA96JiLIDPhmqEdscdRybcRQ2CYywzKA8jSwEQCnEK\nc8a74ceY352l/MEcOem0+AtKrOjqcjbXCayDwC9yTg/c78bkp+4T8AhSWgt6Tlrt\nY8jLE7zwojFtIYtMwobWRIW2O3nJDXiSBbTPG3M9kF1G43INshSdBcuq5Tmy8lpE\n/XiG/E7+hP63Hm+KAcdvl553Zs7pLhAZxV0kqlApqRRwhscw+JQci8sVONun5t9t\nNwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/timestamp.json"}},"spec_version":"0.1.0","version":6}}tiup-1.16.3/pkg/repository/testdata/polluted/manifests/000077500000000000000000000000001505422223000232105ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/testdata/polluted/manifests/index.json000066400000000000000000000024771505422223000252240ustar00rootroot00000000000000{"signatures":[{"keyid":"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61","sig":"auLOcy4p1L2Z7OFCY6i1gshoB0WbDACSd2OZdmpu+Lx78fevZhn3rIljvP2pGl81wm7S3XcXVN2MMSHe0LVtjJSSGhkM1B1wEAgZdO60WpEF41h12en+pke/YzsWksHtNdyGtlyjexamz6YDbIyBEiif/6JyVXZyfoDteGgw4OKFJqzmacTQ0FkWnYVFH6beSiKFuNOD4QoXOAq6KS2wt+pz/Kl443g+TyBRqwESBELwa7VOdKXX9VtDuqqBUjyZ/7xBxpv+2UdkCgs/Khf+91atJSq3lIDEztJ5WFn7qcERNndZaMuISo8uMXD9xi41q5bcQoWGPcj8ANHfkYXTxQ=="}],"signed":{"_type":"index","components":{"tidb":{"hidden":false,"owner":"pingcap","standalone":false,"url":"/tidb.json","yanked":false}},"default_components":[],"expires":"2021-06-23T12:05:15+08:00","owners":{"pingcap":{"keys":{"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"name":"PingCAP","threshold":1}},"spec_version":"0.1.0","version":420}} tiup-1.16.3/pkg/repository/testdata/polluted/manifests/snapshot.json000066400000000000000000000033401505422223000257420ustar00rootroot00000000000000{"signatures":[{"keyid":"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9","sig":"l4wRImk7L956QXuwpq7DH/IkIIHPkTUgr3bdvrbHFZcI1waPOMJlnewFjVe+gyYSCJPqmXDC98c0Blo11938f1sFRO7ZxzIuLsxeqnypMG1/apOkQ7o31ADmsEVkYyaIfFMNVtXJd2QobJhe/ZjDZmxQY16XbGVkuS9y3FnYpPuU1hxJdAjpw49RrxSN0hz8+SQggPxxiPGnP5e4p2Q0F7i8cWUJmpQbzryqIqTRnHGIgqARk2EyKh35JGWlRpSroYgoh0YNIz3O4n+1NreCQ3FljWq4vewR/jtssy3lbn2yl0LJBGa+j1ZC9/Aa9itVSpXJhm5ef1XJQMRfwMVWJA=="}],"signed":{"_type":"snapshot","expires":"2120-08-01T14:47:48+08:00","meta":{"/alertmanager.json":{"length":1511,"version":4},"/bench.json":{"length":6942,"version":13},"/blackbox_exporter.json":{"length":1559,"version":4},"/br.json":{"length":1834,"version":3},"/cdc.json":{"length":7327,"version":47},"/client.json":{"length":4558,"version":1},"/cluster.json":{"length":32845,"version":31},"/ctl.json":{"length":13887,"version":55},"/doc.json":{"length":2952,"version":1},"/drainer.json":{"length":30412,"version":54},"/grafana.json":{"length":37008,"version":59},"/index.json":{"length":3750,"version":420},"/insight.json":{"length":6302,"version":5},"/mirrors.json":{"length":2262,"version":2},"/node_exporter.json":{"length":1523,"version":4},"/package.json":{"length":3778,"version":1},"/pd.json":{"length":34609,"version":61},"/playground.json":{"length":16371,"version":27},"/prometheus.json":{"length":37550,"version":58},"/pump.json":{"length":29950,"version":58},"/pushgateway.json":{"length":1511,"version":4},"/root.json":{"length":7275,"version":2},"/server.json":{"length":1453,"version":2},"/tidb.json":{"length":34947,"version":62},"/tiflash.json":{"length":8300,"version":33},"/tikv.json":{"length":34976,"version":61},"/tiup.json":{"length":9732,"version":29}},"spec_version":"0.1.0","version":0}}tiup-1.16.3/pkg/repository/testdata/polluted/manifests/tidb.json000066400000000000000000001042031505422223000250250ustar00rootroot00000000000000{"signatures":[{"keyid":"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae","sig":"EnVXp+ce2kwAIPKumaVP9IlGjRRXDXlvcMlwnfetwDJCWsTYo0h9g37VX0O4MuPt7E13XdvM3Q3LRogNvLvxqXNpC0sT9LOwBhtT4gjgmgNpeyS93z5c4uDUSESgP5otq+2qczRf/l9OrdrHgszr/lb9aOiRbcdSF2PVDp3Yqf1zlUMjCWRc1u5zjLzr+IMI1tRlkQnM7v4XhH06DR3yDRijkWuy65NM7eYpy7E6wiFyB1ftKIJWznjE9WzkMCr5BKiI+8lu554/1v75LUyDPZt+2fGNVzLpfLkGd2CRbcoEmEYpuuoWYv/kojU/dgXo6/ib389+XTH+YErBgFCffA=="}],"signed":{"_type":"component","description":"TiDB is an open source distributed HTAP database compatible with the MySQL protocol","expires":"2121-07-01T20:01:44+08:00","id":"tidb","nightly":"v4.0.0-beta.2-nightly-20200629","platforms":{"darwin/amd64":{"v3.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9ef93384b98a9c695d1be15e58ffec4dc64ccd0ba2a6f3e5b80a44e0ee55de03","sha512":"9b7e2e07548e24e796332344a6137c4d839e9e75a42bfa4881ee4033558b7a9e3315b3f066472b80b12ef08d6ba8433d1509e38111abb29362bbbe2925206223"},"length":26234205,"released":"2020-04-16T16:58:06+08:00","url":"/tidb-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"61c3ec4864ec1c5551053c4247efcdd90e035bba809e10c943fe0acc89aea40f","sha512":"4fc6458d878c8c94658de5ec4afa6cdda1fcdac0a42b85d37989e6919b7e13012bcca59954384890f6086ed18de318fe02a3c6cdcd8a4bc816209260cf5905c7"},"length":26234250,"released":"2020-04-16T14:03:31+08:00","url":"/tidb-v3.0.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"165870b0c61c07c68e7b9e14372b75f2f94be4691eb52a6f9237e9ef2edcad4b","sha512":"e63e2d1075f9715b9e40bb4c2eccbb107e5e8e419b05eba72a74c9e48da3afcbef705816bf595db7e4bccd62810d2ca419a1da563352ca4e32d89016a8c0ed10"},"length":26302814,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"01eb225d187647bb6bcf5132ea579b6801569ab933bd5650125294d9828f9661","sha512":"9f5dbffea7cfc306848e51511cb418a8e9ea5e21ad8699157407e9403633ce39ee9899618d26f97eda2713b8aa0be231612f77e9e1ee4e87ed934c8a9a3cd9ad"},"length":27351570,"released":"2020-03-13T14:11:53.774527401+08:00","url":"/tidb-v3.0.10-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f1fcbdbadc825ada19b7b15920b06444fd57ef2ce4c3bedfdcd10d3799af9f58","sha512":"fa871ad3e05684990365d9c3ca186e2d6f3426d9fd756539181d61ec4e76551fdaa5ddb1146c183e391e22ac41aaf4c7d5d095c1c3f2711a5575bd7c605a7a5f"},"length":27399702,"released":"2020-04-17T01:09:20+08:00","url":"/tidb-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0ccb355fca4d408c4d75232513ff58632d7bb43278203ce8528fc9e9dd5bbccd","sha512":"7055e820c5e9275c1aa9c7d5622af9f52d1e5be2fad3eab290f08594371016cd0be182232cd537ab3870994c042f78abb1f25541f19202356160cf4977778013"},"length":27399814,"released":"2020-04-17T01:16:04+08:00","url":"/tidb-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7a9d935f01d23d6a9393e13f5ca96a8558e529bb6eeb1641fc6402e06c746670","sha512":"d791b4fceb8224a0ab6ba52b6c042493c0824861888fc69189ff0e9576e63adbb9eca6050812cd0c60bfaee43aea40586f58b5059c9f34be5a9ede1abcc9c303"},"length":27399048,"released":"2020-04-26T17:25:01+08:00","url":"/tidb-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2551f0d2e13d5475770fb85257266c784efc22dc5ae7eb50bb3820eddfb87c4b","sha512":"7d0567a8ea11c7435cb93ff3de5e5f1f41457e743b4cf640eca96178c933ed905f9c61a7a419786cd9c5c3e6ad4dc59d5aec639e58d0a750a08c5a34e45d3fe3"},"length":27501728,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.15":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bbf2dc352567e14df8562c687f7356aa9b71ca26c64021af4129445b8b872038","sha512":"2122032666fb9b83faa2d2746a396044aba6ddee9dd01faa32d1d1e9e7cdd3b9d271c8609383c53b478b45a0ddb106c6bc528419c020d24977e0a0170d9b900f"},"length":27535110,"released":"2020-06-05T16:57:59+08:00","url":"/tidb-v3.0.15-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"6e7c4b5c17ed72e258c472a96a8ed6f63b67757551bb052dbceb718bec963a45","sha512":"a55b2522c396bfd7a989ef0f9822ff5f36d83503f43d0d3ac872992075e9df5fc9cdc53f26ca0b1f1b0aa7ddff710dfbfd26dde4399f523711c31f4c66db3cef"},"length":26394385,"released":"2020-04-16T23:55:11+08:00","url":"/tidb-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1058683f1045004dcff6193d9e593a4288d17ac5e4541bf7a438192e371c551a","sha512":"1440fbffcaf7d00d2326cb04dbee2004973e0e3e7fc6ccf27a4b3f99d15f9d6820f1fc8b01ae5b9338ed1f3d0205cc8386ffb13f08eb8df48279d870e1c0ac97"},"length":26459748,"released":"2020-04-17T00:16:31+08:00","url":"/tidb-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f9bb4c7489dcc392a6cfa2b75bb761fcb953f16f76a692d2a7fb207eefb25dc0","sha512":"2fb797be00000ed0be0f472ebf7a8f89af18adfe0655cdecfcbbd74e443b052aa89e0df42f3ad9c14b9cf6e0ace95f85d0c59c93195036ed243b10eb81f5bdbb"},"length":26563695,"released":"2020-04-17T00:22:46+08:00","url":"/tidb-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"4d1423c05d0c79f0b5d5b2ed5fcb1570e44247ea1e6584a207ab621797a30a5d","sha512":"bb10ae842393dfa8341214a55557d5a40df51b00ccb46b78d49f6762fe06747047774e5d8aca372cd691d08e63318951d821502fce6c16874a0fd1a6d938c32f"},"length":32192126,"released":"2020-04-17T00:29:45+08:00","url":"/tidb-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"b0bf190285676dacf4481798a9a62472bdfee3d135e23d5fb79fcd32c64fa587","sha512":"2a802f452a10eefbe4549af6017c0d12cba86126d59492bd460a4f7fd66c1ef1e373c815f6ec013658af303fdea9b114fef3a4790c3a33f3c96061e243dc52e5"},"length":27124055,"released":"2020-04-17T00:39:33+08:00","url":"/tidb-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"6c3081592ea9b3ae9f87aa572bc861e9cdf3ac4cf05b9ddd047bf6c34f91fe1d","sha512":"4e0c467b7b160080d881e213486a995d1d077102d9b408c4780bc3703ebbff5362349354ce55f2c1881e071c0daefa4f15a3a213b3d57c3533be8f5ad4d570a5"},"length":27120035,"released":"2020-04-17T00:46:32+08:00","url":"/tidb-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"983355652ac6d97089f7a551c6cd6296bc7b471a840807a9aee132a406c99ffe","sha512":"1f38756b0098f5241335fd6edf5872470c9d559620115de9a1ac8c1ee657555ba8156642fb819b530e617641d11689f7be061a4ca02474f2ed5dfd5bc44c21f9"},"length":27246015,"released":"2020-04-17T00:54:19+08:00","url":"/tidb-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"70df998aa933316717e5bca4d2973935284b6ce49d1755c46dec6737676d570b","sha512":"1ac95ea3b902f5708591edd659f710aef0a4d22583986ba3fb41181d7739d39de1f9149d20f9288390917b3dd651c5fb33ad82ea79532dc392c5222130a1c232"},"length":27332410,"released":"2020-04-17T01:00:58+08:00","url":"/tidb-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"4b0c4d69c4e2699ccb5ccae89a81c08817cca106d37644ce261b4f7983e356f7","sha512":"86845acf140cffe808f24124e298db1467dfcd98df6abaf1607b4043ca4c37bf1eaa220e928bff76f9381b8d1929fa1a824aa47e35b482b80414024203ca5b13"},"length":28156482,"released":"2020-05-22T15:34:33+08:00","url":"/tidb-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7154423e6bdf5fc2c689c7fc725bd7c3b9530a5dfb158e54f5da702129910f7e","sha512":"368e779252547dc6405bbea771cfb58b91c5bd14586ebc14967774b615cdf67297fc5b010b55d0aa77b4e111eafc76842e98f01dd7ab403564834926641731c4"},"length":27202791,"released":"2020-05-22T14:35:59+08:00","url":"/tidb-v3.1.0-beta-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"addf1322c81a79d1a316ae9779e7f143135e703e006f8e1ae7f9d0a32d66cb5e","sha512":"a6b274419dbcb0f9796cee257f29d000786e457466ee819b925012992c2921cbd66570f1705b69cc77f14d03660bc750c66b04472fbdfbfeb45ad4a93cd01f8c"},"length":27422699,"released":"2020-05-22T15:22:30+08:00","url":"/tidb-v3.1.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d5a15fbebd4379fa823c869dc4117d19ff889561bb03b19110c5e84bd0fb26c9","sha512":"e47b4787bfb66e02e9233013d302e72266318e09281140e8dde75f7c085215afa8299fdfa0dc2929360e99f25976da6a9a2ef6e7513e9648234ef9768a28ec45"},"length":28062180,"released":"2020-05-22T15:28:20+08:00","url":"/tidb-v3.1.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5d3d621009148f99d4ceefaa1f75e832d1191eea711e22a5370b360450b5d4a3","sha512":"5d04dedd689f9efdbc6ce776e9161b23ec6d671076cc44e09880b9244fa02c16c0b2489dabc1060c9a577374e72bfae7e786bb53bfaad198ca196ac6227a8c9d"},"length":28184931,"released":"2020-05-22T15:56:23+08:00","url":"/tidb-v3.1.0-rc-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d9d5739daa91392bae0ce17455a22c86cd1672645373e1eacee6fe46f732003e","sha512":"fdb81f2c2b491b669830dd358b219520f84ffd07e7a90c2fd1448573f8f540fdb700b302ddeffcae3553f4276345e4dec88b2106fc0f25aada30f4a8d79b86ea"},"length":28369009,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v3.1.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"82b6307eb661d843488e6ab5caee638538526c935022d6ed3e3b581c21ad752a","sha512":"97f86e7ded5c2f26e64a4607c7da761165f5dc235cd28ffa3b45b7474868b0cd51b20494aef2b5c4bed9e59e2d32916504989c4a8ee5160829efb99c854d7bfe"},"length":28368564,"released":"2020-06-04T17:57:09+08:00","url":"/tidb-v3.1.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1aeec438560be6e92f2783ee657827299c1ab4c3f9ae02a3cbc893bf963d3955"},"length":40773797,"released":"2020-05-28T20:10:10+08:00","url":"/tidb-v4.0.0-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e0d904aeee1c0ae215dc363d28d043f21b6d8e52352608e31ab7044b924968ef","sha512":"e1be572e43d53920720b918ee532302165eb9994a2cf46371da7bfe590587988c41d3944add3278e041f7c5947c1fd17aece4a14fe4ead7a3c027ddb6afc9716"},"length":32614395,"released":"2020-05-26T11:18:05+08:00","url":"/tidb-v4.0.0-beta-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"6b9ea806250fe52c7c6db880eee10f83e8e9d0b7c55d281b401eb93527eff1ed","sha512":"b93b373b5eb9b437badab007011fafbbb37c80626be33a46392067352455e8cfeba9faeaaa11d9266502e6bd1553463a3b5cc7d053c427eb7528197f7fcfa3ef"},"length":33489884,"released":"2020-05-26T11:42:48+08:00","url":"/tidb-v4.0.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"cd6e91ff48be1c7ff849aa2c3cad2092923695a3c52ef782f602bdf355697d70","sha512":"14fff5efbef85a7635354eeb915346da67cbc0b4f4fbc94e77cff0236ce37a0dd201a0a557976d8b887243d54a4b6dfee219d0afe4912adbf9ff083747ad231a"},"length":34318099,"released":"2020-05-26T11:56:51+08:00","url":"/tidb-v4.0.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2-nightly-20200629":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1ed0fbc0d2de4a0e7eaf1b45175906934ec2d4768f3bc37ae15854967102a636","sha512":"3aa5faa70bd8009de142523e38048d9896bfe830b599f3f477cfecb56818e07623c8efb79441d5ddb450db781bed333d5b7d49d3dbec508aa65dbb0d0ccac104"},"length":44152618,"released":"2020-06-29T05:00:12+08:00","url":"/tidb-v4.0.0-beta.2-nightly-20200629-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f225e7b60f2b7f4a5264825ca4487089fcc98f0508a150e7928eb2e2b981b5df","sha512":"4827cb724c59cdb06327ab20d9f67ee6e99df67596f3fb28101b05dade6f87ade25e5c384a1ba44cc2ca69f55e72b879e014c6219de11d665213d0d3daee1090"},"length":34458489,"released":"2020-05-26T14:56:06+08:00","url":"/tidb-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e88ac6fff667c0fbebb8a4a5805d9f174ed57ab172f44a417efef69ebca2b310","sha512":"f535fd63037c1dddec93c9e6c4bca0ed27243babdc06fa80943480abe68843e2af04f9193129345d9102e0604f7438adc12a6b4852b04e5861490a23b9cf8d6b"},"length":34826086,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9c3d54eaf90e700e83fbe5354d980deb14fa4d080b93422b0b399efe8732df1b","sha512":"841f78400e06d77e855c56286081611e5861f8fbd9dee4d8408bd4ac7e4baf45c2c26b04053ab3230c8004ee7d2c08aa263c2df642711a2ba0298ff3900146d3"},"length":40461780,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false},"v4.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5a024307caaef69694d8ddfc7ab7323222b7a66728365b2679ef151c59f250f8","sha512":"bcb06f628faba5682026f76409ed490d7caf1825769ed2c478ab29830d795aa26e08011db358a1574afac49b7f7a9d3230e5f60bcf2fe9a1d60dcfba4c1a2d16"},"length":40773791,"released":"2020-06-12T21:22:29+08:00","url":"/tidb-v4.0.1-darwin-amd64.tar.gz","yanked":false},"v4.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"503580650f2efa8f48eb2373deac121e5c634a0368562ef7117dc46c86fb0828","sha512":"edccee096d4825bd4065e828af1aa4cc9f83f0823c580d3304870b21900fe3637607028f1233895f73e869beac2ca00f54a17d9c13be60a53f8a93071c614f3f"},"length":41043044,"released":"2020-07-01T20:01:44+08:00","url":"/tidb-v4.0.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c08a9b1211af6f6a2e6b3393e2c9bacb2e5485739da0942022fd9e3b14949fd3","sha512":"948878fcfd7fb616cc4dc84545187492984cbf31d53c41b9c761d37bdb01068efd5779aa89919bfab938d97fcfdeb0f7e405051993c67278636daa9bcf3f4521"},"length":26787208,"released":"2020-04-16T16:58:06+08:00","url":"/tidb-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"91758e462c216bd273c4afabcfeabc06c037d555a6e23f1a06ab58b3b9cd28c3","sha512":"871c714bd1daba19c6a3729649b3c5b9f2eb3d739f7decea759d7efa85a15f9c2565e1e9de20deb286823ce53ad37cb7941be717fa171917c02d847a655deff1"},"length":26191751,"released":"2020-04-16T14:03:31+08:00","url":"/tidb-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"acce252326320f6d54c1ca737a358c5b5c5e1b54a965f30133c4ef8c6df10109","sha512":"2e455a849f5f185397bdf0865397a6b7c169ad0ec285a4360002235e9a777452603c36e89b92ab0d59ad523193b2cc38252ee42ccbd18c5fbe577a452fbb4deb"},"length":25957605,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"346ddb4078fadbed6bd59dac98c04f2d4b7134a54da0018c9a16deef7e9016be","sha512":"f0561c7a985c00dfdcc0e0f2743f8a91be206cf1bc9c89929409f2e8b1bf109862fc7274cd0734e76364fd02193296689ad71b7a47b59a3e5d16534b04249fef"},"length":27008746,"released":"2020-03-13T14:11:53.774527401+08:00","url":"/tidb-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9232886ac827634fec1def3f5575f3cd4d35675a1b18f07d3a35f84b83f3848e","sha512":"3cddae74d3ea50fd90b056bd97f635db789b4d4094e516aed282efc20fcbe4371f4e575c2d8b533ca5a0fe870cdd8b9cd7dad25c24b222220081adb504c5b92e"},"length":27019356,"released":"2020-04-17T01:09:20+08:00","url":"/tidb-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"70221ca570c31bc6e703169d49bf5e3f1c241a889d3adce47ec7d1f865db5a62","sha512":"8873134f0927e043ea66799e6bed8fa7dd477c9ec2b195e4a89e53957a6d3d8646a191ac286e307fa525dd34f62f7051be433db12108c874ae09c851cffed41f"},"length":27056612,"released":"2020-04-17T01:16:04+08:00","url":"/tidb-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"367de505050520a530f2c5e8bd7b4e1b58305e6a4408c3617c9658248956de78","sha512":"0311fe99bea76e6e92cf06354b95a2adc3ecd83f9cd72db8d1eeebc5ba837e52339fbd1f3caad4efd9b24903615478355529b5434995b0ae7c4c13b4eb783140"},"length":27057866,"released":"2020-04-26T17:25:01+08:00","url":"/tidb-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9e4a8ed5aa19e0eee6124ee9f86c400687d641454b461df9419fc5005b83fae1","sha512":"08c0ce3b960211d03b4608c6f335fe58d16246cf58db5a14112722ec86a5f9fbb8d4c7178faf2e3c910400cbcfe1890707a10c825b95320ee3605739d8aae873"},"length":27153764,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.15":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"36bed0b0e8e5f762bf6c43eaf657700c64cdf08e76fc27c9c672f39df022fc41","sha512":"8ed57ce9c1b385968cf7475f61ae0a1a55abb57b6a6f0fd2e22a04812c6cb94e0efac3646ac37a7bb32602f87b703c9660aa5b62cba97a78c01e3b1b7e51cc1d"},"length":27194062,"released":"2020-06-05T16:50:59+08:00","url":"/tidb-v3.0.15-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"41c0be9510fb590d688527145fdd3252069234e60059030d1a1695839780d045","sha512":"6517339c3f333e0e14963e66e7675232cd2f6a64b808af5c76279342a39314d5c588ec62dcd71c7cec0edfc726d475fc5c34e30ec45d0c5ff85eec749542a85a"},"length":26356240,"released":"2020-04-16T23:55:11+08:00","url":"/tidb-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bfcf9fafd39f41767fc90367beae6e7624427c485ffe3dc4e90f44c0e264fad1","sha512":"35b7b61796c504b44541180f98dbc7471cda859790cc6c11f86b9594e390dc060b2e2182f01c4cc55d05968b92d8c43b2793ffb9c1aaac4ca3f3f05c35c8ffab"},"length":26411379,"released":"2020-04-17T00:16:31+08:00","url":"/tidb-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f5c9a39f083e7384fd1a1b04cec5510c37237d58b9cc99c036a094639b4e08bc","sha512":"3052398c4a12a6ee933d5ae717e2294b08be6d2bb5a136e8336e082018764860bfba82fb0020549a886f2c4ad42da7ef8785a0a98815414be852ad28c799fcfd"},"length":26761632,"released":"2020-04-17T00:22:46+08:00","url":"/tidb-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d847f9214241d7676da39d0704c9e5731843d8a596f2ce9f1d9fab8e9823e7f4","sha512":"4fd4ea0cd3404eea2abe6a4267a109c6b4e58939bb4f8efbf9196f068f0393f19c45250bc92bf3dae5dc354bb080956f381cb1a4eed33909bee9aab77cedc80f"},"length":26260883,"released":"2020-04-17T00:29:45+08:00","url":"/tidb-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2dd998f99a964f417d2dccc88fd31c8afda59c81e0a4deb0897bdca2aadea998","sha512":"718db462b4f1120f90e25eed3b8db3b31b75e9860c891b7d602c70f86b02159112a4fef24aafde4aacfb0f3e64977c0f969e62d087980b22ae694ae5b6989849"},"length":26778819,"released":"2020-04-17T00:39:33+08:00","url":"/tidb-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"4c710645f76255bd591dcb465cb17cc61d9905929617ebec44eb98d84cf0d45f","sha512":"1344e43c4e30770bfa399a95d9ffd50fd45cc80ef5fabd89069af47e014804eec6ffc18e1c262d7a24c3a5ad9d06c0b28bf0cee63f46d638fb7d66d6b415bad1"},"length":26787172,"released":"2020-04-17T00:46:32+08:00","url":"/tidb-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c7002d37bd1b1492445e9d1c7dd3a642f43888750c591f5bb026d7f9c983cc6e","sha512":"b6c9c3c3a5e6e8ac30d6e43376b1f1500de1d06d305a3c33d87676490a6a738c8256c2eedd3f8ce4abf3c193896e27f3700c10a6f2f7082b6d1c3c495b861d33"},"length":26903200,"released":"2020-04-17T00:54:19+08:00","url":"/tidb-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d76da75d2122a3aef6805a4c936b24f1e6af1466163501706aab0ae900d260f9","sha512":"79f0f4d0d91a54cbd0bf2489dd683541f61ab606893a423b9c0911a40a630f20a789e9df5ce73ef2c2cf2554926e63ba5589ae439c6b90619cc29006f2d4edc6"},"length":26997279,"released":"2020-04-17T01:00:58+08:00","url":"/tidb-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bf7d58ee1565daa4d24370e32adeca87edded2f497ab277c137fa3e0652b4101","sha512":"f75dbf6ad349ae96bd157fdc6c0932627c230eeeacffe0091e123f3453136423cdc41968717c0263c562b0491dcfe0b5e9989fe3e35e0cc751964f250e27d95c"},"length":27796291,"released":"2020-05-22T15:34:33+08:00","url":"/tidb-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c4f7414be060716ba0b841e0fb0075faa1fa15d99db246a5500a527fe338e7bc","sha512":"895f1edaf11cca877b6ba6ce35f19b48cdf5527fabe697a8c11290d33d464204642527eb26581853c8b89fa69dd1a3dfabf940bb60a7d9ae99a55e9c45c19257"},"length":26868228,"released":"2020-05-22T14:35:59+08:00","url":"/tidb-v3.1.0-beta-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"47fd44d4c68195bec2bae4de912b2d6b3dc58cd74faa83f166d2939dcd2398d4","sha512":"7eb26d17f38e919b8970b687c1266c5cf3dec9db8ac7910597826fc45406d6aeec506bd7690309acc927c9b3c39d50400e92155a527464be2638428828047843"},"length":27077345,"released":"2020-05-22T15:22:30+08:00","url":"/tidb-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"cf2079cd886c3878db85a6484083d926f73562dcb93444e89efc9e444f534b8c","sha512":"97baba009d34171b882d46e16af1ad1524f56c2ef10cc3cc9fbeabe874367bad10f312701e751348ddc9bbc07a7957010fe932baab7cc558eb855c171ded9ec4"},"length":27717224,"released":"2020-05-22T15:28:20+08:00","url":"/tidb-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"88dca8e755713b9e5f21deab0eb0c7687f668ff48c1839ffa1d346b854721840","sha512":"5030bc57db3c0a002b9f955170f8a4f3233a334c27761b8c60c2b12280d128323ef6fbf5566ec851c7eeebc03c3c4d0dd0d2d8a750f3cd191a6a9227d5297416"},"length":27840449,"released":"2020-05-22T15:56:23+08:00","url":"/tidb-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bd7b1e1cbafac6b4875fef0360083dff74ee65cbcd72d2e4da7ea84c47f83f6c","sha512":"fbc8ce3048f7b09ecda269b891fb0acff320682711e59d11886674f22194cc5b9b2259ba4deac7d1beccf61a7cf29e3fb322b749eb41aa260f2970fd13852496"},"length":28015426,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-linux-amd64.tar.gz","yanked":false},"v3.1.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"981128adf9ccaf62c13fb677b9467265ff36c76adbafb3fd6f774e61d6879827","sha512":"c3818ac13639b3e7da8f232fa9c773dc27ad742d5fc27aeb27b5ac4a1b80c5439df0baa2a2b7bb77f6f4b7c8cd16b59c04d4b906b2c5a650106370021c6b7bfd"},"length":28016399,"released":"2020-06-04T17:53:39+08:00","url":"/tidb-v3.1.2-linux-amd64.tar.gz","yanked":false},"v4.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"48edaf41220e07199c2f842ebbf52a2eb543b6c8beebb1f21270694e4dbc3fa1"},"length":40481686,"released":"2020-05-28T16:23:23+08:00","url":"/tidb-v4.0.0-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7e6ebdebb1af2135119fc1cb34f1253b88f563f8b9e299e8d69a714bf5809043","sha512":"11a719f45b2dc4572aa1d158679d15a1ad1f02188790f016f78e7b62fb0038fa30bfec2ccf8a75b26ab11bbf572598652866b568052f24b6180b1f7f0ca8697b"},"length":32294889,"released":"2020-05-26T11:18:05+08:00","url":"/tidb-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"a17b34e13d27f9a0bb6f4c3736162af1fbf6d423b9d6ddc402e53bd88e3feb0b","sha512":"5f3debdbf73ef3e0de0dbef2f79d2cdb2516df6fc03103239a4e649759ebfc583fd7f326f8793598c617b004de80087bb90553a237b40b028809c927c1b787c8"},"length":33118956,"released":"2020-05-26T11:42:48+08:00","url":"/tidb-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c454f5aa16f94b36b834d1bd7be9fef0767738c9ccc1273b834b6bab04f073b8","sha512":"e936487500fd1b646a0a8f46ba0afc5032dd983c45789af71f918d269329b7d7fcaa9859a17c43eddd0676f1bf6100fbb3977d1ba3e54105725c54219b2fc162"},"length":33940011,"released":"2020-05-26T11:56:51+08:00","url":"/tidb-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2-nightly-20200629":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"93bde8b1259abadf19240b570f4f5523f5f781b12421c6a669856d4376a5d513","sha512":"4a50c14754b1b29fc423d81434dde00304110b036c4a2940250e94e05836ea87f10a5b890d443b27b6a6f999a115f4a8db86ca300b26a6129e9b64bb94c5c0ec"},"length":43922053,"released":"2020-06-29T04:57:29+08:00","url":"/tidb-v4.0.0-beta.2-nightly-20200629-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"46b0db34ccc02d34663667c58032f158cd093acdc79da5c080eff30221ed0072","sha512":"4f65c55109b3dcce316196599974d51da06c3da71948171937233187dfcc2f27a2f52c2900dbc84507ec0efdd2efa7293463a402117ceaa4e3623b44c0ce25e7"},"length":34051221,"released":"2020-05-26T14:56:06+08:00","url":"/tidb-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2d5fab2bcc8793d85447ad2769eac8fca58d25de685039296e30cee0d0798137","sha512":"71ebc639cb23bd9671696ed43c974be6ab74781cf6543e5f3ff35ae69fac882557b6b49f8df8c7ee7cb5abeaef28b71f8edbb02d921a1aa2af6d58a33fc36b6d"},"length":34431514,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0bcd84a48071c4059519e64e743f40e6435670db0c4d9050f11b48136543a6ff","sha512":"f36c63ca590b624a0aea518f5141624b13c6d36b20f5ab0c0c973723cd481fee51753aa1b538a7fc1a81a3fb409830ed98576f0eb4d13b1bbcf31ce5eed67596"},"length":40116238,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false},"v4.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"b80edea55cdd3bfaa79b5e0d25ae23e96decd574913801c8f5265b9b7af81d11","sha512":"ae83c1b94139c873e56940e2f872f54c4b6013c3c0d521a9176389bb52f860ea6a5cf6566a30b2d45273a2b339f25747fb30eb5ce7549fc5624acab722da75db"},"length":40473132,"released":"2020-06-15T12:00:45+08:00","url":"/tidb-v4.0.1-linux-amd64.tar.gz","yanked":false},"v4.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"32178adcfebabd3a49099c905f4389fdcc446e69c64a19bb7431782f83d7b956","sha512":"b1d561978d34f879660083081bfcf29cc546967cdcbbb85e7d8bd45f49323f128bc53d2fda907ac4735669ea406acd088d97dcd528de55e95b4954fb3746e2e9"},"length":40735884,"released":"2020-07-01T19:57:14+08:00","url":"/tidb-v4.0.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"a90257c97d3c2ad4591899bee76328d2ca7db5654f55a39f5258bde1ddd025bc","sha512":"443dd0b778d2ac455687db8a891b88523061e86d8d7ea5c96f15bfacd8022a83565bac6af9e4f1cd99843f209bb08aee57b3e16cfe1b0321fe46286ccc620109"},"length":24791080,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0d303763b988d9262f5c7a750212b61786c56234823bde988922c1a205cdef13","sha512":"1db89fc07db4258aea659d832eb642561f59cb71de3eee14a52422889bf6c0ee03c99b27d7ffad20251b3267a680cc487f814f887e5a28e63ee69cc9f3ac9e6c"},"length":25904695,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.0.15":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"900c48cb13bf9c20d52051ff2e935d7c01c706673a517b0292f9ca0dd36cef05","sha512":"96651d9e8bb5267900b3e415a3dc53deb0c6515ca62e508c8d93a4b202402cb571a6050355e6f6eb52dd5649a76b0d73b6c4f0e536bed7cc1624503b57ade3c6"},"length":25955262,"released":"2020-06-05T16:53:32+08:00","url":"/tidb-v3.0.15-linux-arm64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e48df49c5b12ec301a2e5c8f526f82d02ddacf86084068aa7c7d7bf00f1d43ef","sha512":"0eeeff903bc436734c4d7d5fbea965bca29befa84043e4e800d85335582a11ee5162a6850236a8e67fb0cd71d551b8d72f8ec3014c3cec3b6c0043ecfbc906bd"},"length":26492125,"released":"2020-05-22T15:34:33+08:00","url":"/tidb-v3.1.0-linux-arm64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"02e3256c53eef4dbbdd3575f10d5697dcea1ce9e4e139383d777bcc83869260d","sha512":"705d4d1a02b7657200539d41f7c0a45aef9ed09e784991de366d232aa3b5c0774408e16a2e6b91588c2256ede3786b565141493b3076ceee4d9fc99b058564ba"},"length":25629984,"released":"2020-05-22T14:35:59+08:00","url":"/tidb-v3.1.0-beta-linux-arm64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e5484ded8d48e13ad25b600e5fa3f66c634de3bfc1624208143ee90ace3578f7","sha512":"8b566e03037a173d5c468e488e9faeae2a00d02bc7a4f464ffa30b45e81201c622843c001ae52139786530a13799ab9dcec94edbdb3a3bc7a4bc4e02ad63f446"},"length":25852605,"released":"2020-05-22T15:22:30+08:00","url":"/tidb-v3.1.0-beta.1-linux-arm64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"15498fbad839b1939ceb5a9339b74f349b77e4d26fc5f57815f33958f15d344e","sha512":"8a941309a0edb7e070bce86ca988d7174a262be377720157dac7d5df131caea34116fdbe376bdee196bd8ff8813fe39d796b849cedf4f7c6be91cdfc9a0ff636"},"length":26427881,"released":"2020-05-22T15:28:20+08:00","url":"/tidb-v3.1.0-beta.2-linux-arm64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"150f9e56a4d20f3abc2f0995e5ff1e9b71ba40162db0af42912e11238ec1bcc5","sha512":"82b22c14f18b41277d214b0c5def64fa96dcd376c3135debdfc69267e9821737daa679a8993afedbbd46552b02542d804b176a662854c04f6613fce95356be5d"},"length":26548117,"released":"2020-05-22T15:56:23+08:00","url":"/tidb-v3.1.0-rc-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"35209970a3014f3d655c220acf6259b2fe16c28f5efdfb61150681070e736a5e","sha512":"5149ee0260dd877fdd13821e243ddc8fc156475ca4a3fe0c04b066836c38170cff3f2c1611fbffde920e6d9833718a8bd174258073e1762ec7a1238c1c7ea77d"},"length":26595507,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-linux-arm64.tar.gz","yanked":false},"v3.1.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bcdb3cb0e2fd75337a019b4a94d9f324d37cd069f0123a0074bae54595e04312","sha512":"5bfe1a5fde54a9e3328a7e5389b2469cec6326621c9d9964189535870714c004983d16e92e50b30d47e3e6a31784fec7311c4dc54773943d9ec07516a9b5d985"},"length":26594970,"released":"2020-06-04T17:55:40+08:00","url":"/tidb-v3.1.2-linux-arm64.tar.gz","yanked":false},"v4.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"ad1bf23b8eec2ab65e3e41bc1970ecaa0ca4d60979f45e48a74e83ddb6606426"},"length":38318862,"released":"2020-05-28T20:10:11+08:00","url":"/tidb-v4.0.0-linux-arm64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f2eceda6a197735070308c5900c06b2c44f5fcb919a245b52a706b64219614f6","sha512":"2b4ea9e2034a4b6948ef61e36907ba6ba1706b5b6204b67b033b5fdd9b19edc3a61f27c5714d22d7b08524ab07b259f23d4d98bbd48ad3acda31dd647d315919"},"length":30628110,"released":"2020-05-26T11:18:05+08:00","url":"/tidb-v4.0.0-beta-linux-arm64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5f091a07d8f1570c2998a69cb58470ec157a461024ca0691dc77c84b6c6be8b6","sha512":"6c0e3b4f7d7d44c9749eab89f366b604cec7c0fdb2ffb4e899f57af993e424ab695493b4b7e07ab7fa2d55996dab6d84878396f4780068208d1ece978414353a"},"length":31396287,"released":"2020-05-26T11:42:48+08:00","url":"/tidb-v4.0.0-beta.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0db43d917e73e4151aa5a009c8370a5e18c350e38d324ece844367d2db097358","sha512":"4cac617b54b6245f85c323a686ae7fe3125a2233c2946f08a3620cb66dbf3738c9916215ef255cd9c583e57d81df74388a789547a5d568f74785019d59f6f41c"},"length":32162424,"released":"2020-05-26T11:56:51+08:00","url":"/tidb-v4.0.0-beta.2-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"964ff7932ac73c77389e0230bd883a9ea124954e08ed186ae3da683e644fb15f","sha512":"979dff98cda72a6d2ab2b0973afce48f597dbfe4ad6f547ff5dfefff5657bdd1d7786c91c290e1a4a7ad1148061d53ecde195256c4d16a6f9e3ece9ad1ceab32"},"length":32274928,"released":"2020-05-26T14:56:06+08:00","url":"/tidb-v4.0.0-rc-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1964979c80f03e20dda9c6e3b5abbc77fe8345c8a8b4bb2c15aaef92a72f2136","sha512":"218cb429a237e3d6d9e2dd7bc43b605a0dad5bae5a2a1cc0af3e8f7336b85ea54502a5729e8034c540c9c6b3eca97dc49c40124c106ea0c15335d2cdb2bc6fc9"},"length":32626370,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"63297933955cdfb166073722b4d2c1f95191f36e25dae977223889c3123f71c3","sha512":"56e1b8d267e658d4b58c6695976df01836386070cf5acc23cbc566f0e0102143381b85569f744c735eeddba9d7272243849e8e90d484d644347fa1b0a86bef91"},"length":38034242,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false},"v4.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"340ee4a4871b8e560ec4c8cdfaf2e4b0c8e90eb8cc72fc5e90129c27286562e5","sha512":"ad2d18321be9d4a6cd71853954bb356a12318e7b4cd357f978324bf9434e4ecd82244c80bc3b005855990e18104ae2fee1d87b5aefcab175a5b70f822ba1d404"},"length":38328065,"released":"2020-06-12T21:21:12+08:00","url":"/tidb-v4.0.1-linux-arm64.tar.gz","yanked":false},"v4.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1d46c0579b9aa7f8328c6d7c03f7a042e2aaf01e1864880eacb5a96a02fbe226","sha512":"d344877e3d62b4b9caa56281915eb548dd531e4bc84e57fb3132ece26c6839bf2bb0ff5b7483e640b6c3223ffb5a1ea1b1684fd5e78d51886457296f96027d25"},"length":38566695,"released":"2020-07-01T19:59:39+08:00","url":"/tidb-v4.0.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":62}}tiup-1.16.3/pkg/repository/testdata/polluted/manifests/timestamp.json000066400000000000000000000012511505422223000261050ustar00rootroot00000000000000{"signatures":[{"keyid":"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd","sig":"V8MgDDCmfVb8N0O3unbAno8q6i2Ag1Sbr/3n12Odk8McKzZaif7OcDm1IZB5J3o7ajsBF1tduTrcO7OijJQvx8l9i6aZi9J1lb/eJpYsyvQWdzd/T7osdRkEIhtM4/sGFjGslOolTFmpA/U5IkJ+FWAi38YaFPRn8bfIPLGniRAYs4/qjLBB3RgBUlDIIVvTiJIHEHtf3Bqb5LjpEjW4XhmDK94LJbKUqfO/6oDnQzI6Rot7zBWwDQVrIHakvQxoqA5c2jtMHCXSdX9cN7aRrNO4csggMzvQot7K0JYYszlroXnsL2ioNMgcPhtoEaMLW9mFjmdgR0j1//n1mxtdWA=="}],"signed":{"_type":"timestamp","expires":"2000-08-01T14:47:48+08:00","meta":{"/snapshot.json":{"hashes":{"sha256":"24c9fa83f15eda0683999b98ac0ff87fb95aed91c10410891fb38313f38e35c1"},"length":1760}},"spec_version":"0.1.0","version":639}}tiup-1.16.3/pkg/repository/utils/000077500000000000000000000000001505422223000167165ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/utils/hash.go000066400000000000000000000022211505422223000201650ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "crypto/sha256" "crypto/sha512" "encoding/hex" "io" "os" "github.com/pingcap/tiup/pkg/repository/v1manifest" ) // HashFile returns the sha256/sha512 hashes and the file length of specific file func HashFile(filepath string) (map[string]string, int64, error) { s256 := sha256.New() s512 := sha512.New() file, err := os.Open(filepath) if err != nil { return nil, 0, err } defer file.Close() n, err := io.Copy(io.MultiWriter(s256, s512), file) hashes := map[string]string{ v1manifest.SHA256: hex.EncodeToString(s256.Sum(nil)), v1manifest.SHA512: hex.EncodeToString(s512.Sum(nil)), } return hashes, n, err } tiup-1.16.3/pkg/repository/v1_repository.go000066400000000000000000000673221505422223000207440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "bytes" "crypto/sha256" "encoding/hex" "fmt" "io" "os" "path" "path/filepath" "runtime" "sort" "strconv" "strings" "time" "github.com/fatih/color" cjson "github.com/gibson042/canonicaljson-go" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/localdata" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/mod/semver" "golang.org/x/sync/errgroup" ) // ErrUnknownComponent represents the specific component cannot be found in index.json var ErrUnknownComponent = errors.New("unknown component") // ErrUnknownVersion represents the specific component version cannot be found in component.json var ErrUnknownVersion = errors.New("unknown version") // V1Repository represents a remote repository viewed with the v1 manifest design. type V1Repository struct { Options mirror Mirror local v1manifest.LocalManifests timestamp *v1manifest.Manifest } // ComponentSpec describes a component a user would like to have or use. type ComponentSpec struct { // TargetDir it the target directory of the component, // Will use the default directory of Profile if it's empty. TargetDir string // ID is the id of the component ID string // Version describes the versions which are desirable; "" = use the most recent, compatible version. Version string // Force is true means overwrite any existing installation. Force bool } // NewV1Repo creates a new v1 repository from the given mirror // local must exists a trusted root. func NewV1Repo(mirror Mirror, opts Options, local v1manifest.LocalManifests) *V1Repository { if opts.GOOS == "" { opts.GOOS = runtime.GOOS } if opts.GOARCH == "" { opts.GOARCH = runtime.GOARCH } repo := &V1Repository{ Options: opts, mirror: mirror, local: local, } return repo } const maxTimeStampSize uint = 1024 const maxRootSize uint = 1024 * 1024 // WithOptions clone a new V1Repository with given options func (r *V1Repository) WithOptions(opts Options) Repository { return NewV1Repo(r.Mirror(), opts, r.local) } // Mirror returns Mirror func (r *V1Repository) Mirror() Mirror { return r.mirror } // LocalLoadManifest returns the local cached manifests func (r *V1Repository) LocalLoadManifest(index *v1manifest.Index) (*v1manifest.Manifest, bool, error) { return r.local.LoadManifest(index) } // LocalLoadComponentManifest returns the local cached manifests for component func (r *V1Repository) LocalLoadComponentManifest(component *v1manifest.ComponentItem, filename string) (*v1manifest.Component, error) { return r.local.LoadComponentManifest(component, filename) } // LocalComponentInstalled checks if the current component is already installed. func (r *V1Repository) LocalComponentInstalled(component, version string) (bool, error) { return r.local.ComponentInstalled(component, version) } // UpdateComponents updates the components described by specs. func (r *V1Repository) UpdateComponents(specs []ComponentSpec) error { err := r.ensureManifests() if err != nil { return err } keepSource := false if v := os.Getenv(localdata.EnvNameKeepSourceTarget); v == "enable" || v == "true" { keepSource = true } var errs []string for _, spec := range specs { manifest, err := r.updateComponentManifest(spec.ID, false) if err != nil { if errors.Cause(err) == ErrUnknownComponent { fmt.Println(color.YellowString("The component `%s` not found (may be deleted from repository); skipped", spec.ID)) } else { errs = append(errs, err.Error()) } continue } if spec.Version == utils.NightlyVersionAlias { if !manifest.HasNightly(r.PlatformString()) { fmt.Printf("The component `%s` on platform %s does not have a nightly version; skipped\n", spec.ID, r.PlatformString()) continue } spec.Version = manifest.Nightly } if spec.Version == "" { ver, _, err := r.LatestStableVersion(spec.ID, false) if err != nil { errs = append(errs, err.Error()) continue } spec.Version = ver.String() } if !spec.Force { installed, err := r.local.ComponentInstalled(spec.ID, spec.Version) if err != nil { return err } if installed { fmt.Printf("component %s version %s is already installed\n", spec.ID, spec.Version) continue } } targetDir := filepath.Join(r.local.TargetRootDir(), localdata.ComponentParentDir, spec.ID, spec.Version) if spec.TargetDir != "" { targetDir = spec.TargetDir } versionItem, err := r.ComponentVersion(spec.ID, spec.Version, false) if err != nil { return err } target := filepath.Join(targetDir, versionItem.URL) err = r.DownloadComponent(versionItem, target) if err != nil { os.RemoveAll(targetDir) errs = append(errs, err.Error()) continue } reader, err := os.Open(target) if err != nil { os.RemoveAll(targetDir) errs = append(errs, err.Error()) continue } err = r.local.InstallComponent(reader, targetDir, spec.ID, spec.Version, versionItem.URL, r.DisableDecompress) reader.Close() if err != nil { os.RemoveAll(targetDir) errs = append(errs, err.Error()) } // remove the source gzip target if expand is on && no keep source if !r.DisableDecompress && !keepSource { _ = os.Remove(target) } } if len(errs) > 0 { return errors.New(strings.Join(errs, "\n")) } return nil } // ensureManifests ensures that the snapshot, root, and index manifests are up to date and saved in r.local. func (r *V1Repository) ensureManifests() error { defer func(start time.Time) { logprinter.Verbose("Ensure manifests finished in %s", time.Since(start)) }(time.Now()) // Update snapshot. snapshot, err := r.updateLocalSnapshot() if err != nil { return err } return r.updateLocalIndex(snapshot) } // Postcondition: if returned error is nil, then the local snapshot and timestamp are up to date and return the snapshot func (r *V1Repository) updateLocalSnapshot() (*v1manifest.Snapshot, error) { defer func(start time.Time) { logprinter.Verbose("Update local snapshot finished in %s", time.Since(start)) }(time.Now()) timestampChanged, tsManifest, err := r.fetchTimestamp() if v1manifest.IsSignatureError(errors.Cause(err)) || v1manifest.IsExpirationError(errors.Cause(err)) { // The signature is wrong, update our signatures from the root manifest and try again. err = r.updateLocalRoot() if err != nil { return nil, err } timestampChanged, tsManifest, err = r.fetchTimestamp() if err != nil { return nil, err } } else if err != nil { return nil, err } var snapshot v1manifest.Snapshot snapshotManifest, snapshotExists, err := r.local.LoadManifest(&snapshot) if err != nil { return nil, err } hash := tsManifest.Signed.(*v1manifest.Timestamp).SnapshotHash() bytes, err := cjson.Marshal(snapshotManifest) if err != nil { return nil, err } hash256 := sha256.Sum256(bytes) snapshotChanged := true // TODO: check changed in fetchTimestamp by compared to the raw local snapshot instead of timestamp. if snapshotExists && hash.Hashes[v1manifest.SHA256] == hex.EncodeToString(hash256[:]) { // Nothing has changed in snapshot.json snapshotChanged = false } if snapshotChanged { manifest, err := r.fetchManifestWithHash(v1manifest.ManifestURLSnapshot, &snapshot, &hash) if err != nil { return nil, err } // Persistent the snapshot first and prevent the snapshot.json/timestamp.json inconsistent // 1. timestamp.json is fetched every time // 2. when interrupted after timestamp.json been saved but snapshot.json have not, the snapshot.json is not going to be updated anymore err = r.local.SaveManifest(manifest, v1manifest.ManifestFilenameSnapshot) if err != nil { return nil, err } // Update root if needed and restart the update process var oldRoot v1manifest.Root _, _, err = r.local.LoadManifest(&oldRoot) if err != nil { return nil, err } newRootVersion := snapshot.Meta[v1manifest.ManifestURLRoot].Version if newRootVersion > oldRoot.Version { err := r.updateLocalRoot() if err != nil { return nil, err } return r.updateLocalSnapshot() } } if timestampChanged { err = r.local.SaveManifest(tsManifest, v1manifest.ManifestFilenameTimestamp) if err != nil { return nil, err } } return &snapshot, nil } // FnameWithVersion returns a filename, which contains the specific version number func FnameWithVersion(fname string, version uint) string { base := filepath.Base(fname) dir := filepath.Dir(fname) versionBase := strconv.Itoa(int(version)) + "." + base return filepath.Join(dir, versionBase) } func (r *V1Repository) updateLocalRoot() error { defer func(start time.Time) { logprinter.Verbose("Update local root finished in %s", time.Since(start)) }(time.Now()) oldRoot, err := r.loadRoot() if err != nil { return err } startVersion := oldRoot.Version keyStore := *r.local.KeyStore() var newManifest *v1manifest.Manifest for { var newRoot v1manifest.Root url := FnameWithVersion(v1manifest.ManifestURLRoot, oldRoot.Version+1) nextManifest, err := r.fetchManifestWithKeyStore(url, &newRoot, maxRootSize, &keyStore) if err != nil { // Break if we have read the latest version. if errors.Cause(err) == ErrNotFound { break } return err } newManifest = nextManifest if newRoot.Version != oldRoot.Version+1 { return errors.Errorf("root version is %d, but should be: %d", newRoot.Version, oldRoot.Version+1) } if err = v1manifest.ExpiresAfter(&newRoot, oldRoot); err != nil { return err } // This is a valid new version. err = r.local.SaveManifest(newManifest, v1manifest.RootManifestFilename(newRoot.Version)) if err != nil { return err } oldRoot = &newRoot } // We didn't change anything. if startVersion == oldRoot.Version { return nil } // Check expire of this version. err = v1manifest.CheckExpiry(v1manifest.ManifestFilenameRoot, oldRoot.Expires) if err != nil { return err } // Save the new trusted root without a version number. This action will also update the key store in r.local from // the new root. err = r.local.SaveManifest(newManifest, v1manifest.ManifestFilenameRoot) if err != nil { return err } return nil } // Precondition: the root manifest has been updated if necessary. func (r *V1Repository) updateLocalIndex(snapshot *v1manifest.Snapshot) error { defer func(start time.Time) { logprinter.Verbose("Update local index finished in %s", time.Since(start)) }(time.Now()) // Update index (if needed). var oldIndex v1manifest.Index _, exists, err := r.local.LoadManifest(&oldIndex) if err != nil { return err } snapIndexVersion := snapshot.Meta[v1manifest.ManifestURLIndex].Version if exists && oldIndex.Version == snapIndexVersion { return nil } root, err := r.loadRoot() if err != nil { return err } url, fileVersion, err := snapshot.VersionedURL(root.Roles[v1manifest.ManifestTypeIndex].URL) if err != nil { return err } var index v1manifest.Index manifest, err := r.fetchManifest(url, &index, fileVersion.Length) if err != nil { return err } if exists && index.Version < oldIndex.Version { return errors.Errorf("index manifest has a version number < the old manifest (%v, %v)", index.Version, oldIndex.Version) } return r.local.SaveManifest(manifest, v1manifest.ManifestFilenameIndex) } // Precondition: the snapshot and index manifests exist and are up to date. func (r *V1Repository) updateComponentManifest(id string, withYanked bool) (*v1manifest.Component, error) { defer func(start time.Time) { logprinter.Verbose("update component '%s' manifest finished in %s", id, time.Since(start)) }(time.Now()) // Find the component's entry in the index and snapshot manifests. var index v1manifest.Index _, _, err := r.local.LoadManifest(&index) if err != nil { return nil, err } var components map[string]v1manifest.ComponentItem if withYanked { components = index.ComponentListWithYanked() } else { components = index.ComponentList() } item, ok := components[id] if !ok { return nil, ErrUnknownComponent } var snapshot v1manifest.Snapshot _, _, err = r.local.LoadManifest(&snapshot) if err != nil { return nil, err } filename := v1manifest.ComponentManifestFilename(id) url, fileVersion, err := snapshot.VersionedURL(item.URL) if err != nil { return nil, err } oldVersion := r.local.ManifestVersion(filename) if oldVersion != 0 && oldVersion == fileVersion.Version { // We're up to date, load the old manifest from disk. comp, err := r.local.LoadComponentManifest(&item, filename) if comp == nil && err == nil { err = fmt.Errorf("component %s does not exist", id) } return comp, err } var component v1manifest.Component manifest, fetchErr := r.fetchComponentManifest(&item, url, &component, fileVersion.Length) if fetchErr != nil { // ignore manifest expiration error here and continue building component object, // the manifest expiration error should be handled by caller, so try to return it // with a valid component object. if !v1manifest.IsExpirationError(errors.Cause(fetchErr)) { return nil, fetchErr } } if oldVersion != 0 && component.Version < oldVersion { return nil, fmt.Errorf("component manifest for %s has a version number < the old manifest (%v, %v)", id, component.Version, oldVersion) } err = r.local.SaveComponentManifest(manifest, filename) if err != nil { return nil, err } return &component, fetchErr } // DownloadComponent downloads the component specified by item into local file, // the component will be removed if hash is not correct func (r *V1Repository) DownloadComponent(item *v1manifest.VersionItem, target string) error { // make a tempdir such that every download will not inference each other targetDir := filepath.Dir(target) err := os.MkdirAll(targetDir, 0755) if err != nil { return errors.Trace(err) } targetDir, err = os.MkdirTemp(targetDir, "download") if err != nil { return errors.Trace(err) } if err := r.mirror.Download(item.URL, targetDir); err != nil { return err } // the downloaded file is named by item.URL, which maybe differ to target name if downloaded := path.Join(targetDir, item.URL); downloaded != target { err := os.Rename(downloaded, target) if err != nil { return err } } reader, err := os.Open(target) if err != nil { return err } _, err = checkHash(reader, item.Hashes[v1manifest.SHA256]) reader.Close() if err != nil { // remove the target compoonent to avoid attacking _ = os.Remove(target) return errors.Errorf("validation failed for %s: %s", target, err) } return nil } // PurgeTimestamp remove timestamp cache from repository func (r *V1Repository) PurgeTimestamp() { r.timestamp = nil } // FetchTimestamp downloads the timestamp file, validates it, and checks if the snapshot hash in it // has the same value of our local one. (not hashing the snapshot file itself) // Return weather the manifest is changed compared to the one in local ts and the FileHash of snapshot. func (r *V1Repository) fetchTimestamp() (changed bool, manifest *v1manifest.Manifest, err error) { // check cache first if r.timestamp != nil { return false, r.timestamp, nil } defer func(start time.Time) { logprinter.Verbose("Fetch timestamp finished in %s", time.Since(start)) r.timestamp = manifest }(time.Now()) var ts v1manifest.Timestamp manifest, err = r.fetchManifest(v1manifest.ManifestURLTimestamp, &ts, maxTimeStampSize) if err != nil { return false, nil, err } hash := ts.SnapshotHash() var localTs v1manifest.Timestamp _, exists, err := r.local.LoadManifest(&localTs) if err != nil { return false, nil, err } switch { case !exists: changed = true case ts.Version < localTs.Version: return false, nil, fmt.Errorf("timestamp manifest has a version number < the old manifest (%v, %v)", ts.Version, localTs.Version) case hash.Hashes[v1manifest.SHA256] != localTs.SnapshotHash().Hashes[v1manifest.SHA256]: changed = true } return changed, manifest, nil } // PlatformString returns a string identifying the current system. func PlatformString(os, arch string) string { return fmt.Sprintf("%s/%s", os, arch) } // PlatformString returns a string identifying the current system. func (r *V1Repository) PlatformString() string { return PlatformString(r.GOOS, r.GOARCH) } func (r *V1Repository) fetchComponentManifest(item *v1manifest.ComponentItem, url string, com *v1manifest.Component, maxSize uint) (*v1manifest.Manifest, error) { return r.fetchBase(url, maxSize, func(reader io.Reader) (*v1manifest.Manifest, error) { return v1manifest.ReadComponentManifest(reader, com, item, r.local.KeyStore()) }) } // fetchManifest downloads and validates a manifest from this repo. func (r *V1Repository) fetchManifest(url string, role v1manifest.ValidManifest, maxSize uint) (*v1manifest.Manifest, error) { return r.fetchBase(url, maxSize, func(reader io.Reader) (*v1manifest.Manifest, error) { return v1manifest.ReadManifest(reader, role, r.local.KeyStore()) }) } func (r *V1Repository) fetchManifestWithKeyStore(url string, role v1manifest.ValidManifest, maxSize uint, keys *v1manifest.KeyStore) (*v1manifest.Manifest, error) { return r.fetchBase(url, maxSize, func(reader io.Reader) (*v1manifest.Manifest, error) { return v1manifest.ReadManifest(reader, role, keys) }) } func (r *V1Repository) fetchManifestWithHash(url string, role v1manifest.ValidManifest, hash *v1manifest.FileHash) (*v1manifest.Manifest, error) { return r.fetchBase(url, hash.Length, func(reader io.Reader) (*v1manifest.Manifest, error) { bufReader, err := checkHash(reader, hash.Hashes[v1manifest.SHA256]) if err != nil { return nil, errors.Annotatef(err, "validation failed for %s", url) } return v1manifest.ReadManifest(bufReader, role, r.local.KeyStore()) }) } func (r *V1Repository) fetchBase(url string, maxSize uint, f func(reader io.Reader) (*v1manifest.Manifest, error)) (*v1manifest.Manifest, error) { reader, err := r.mirror.Fetch(url, int64(maxSize)) if err != nil { return nil, errors.Annotatef(err, "fetch %s from mirror(%s) failed", url, r.mirror.Source()) } defer reader.Close() m, err := f(reader) if err != nil { return m, errors.Annotatef(err, "read manifest from mirror(%s) failed", r.mirror.Source()) } return m, nil } func checkHash(reader io.Reader, sha256 string) (io.Reader, error) { buffer := new(bytes.Buffer) _, err := io.Copy(buffer, reader) if err != nil { return nil, err } b := buffer.Bytes() bufReader := bytes.NewReader(b) if err = utils.CheckSHA256(bufReader, sha256); err != nil { return nil, err } _, err = bufReader.Seek(0, io.SeekStart) if err != nil { return nil, err } return bufReader, nil } func (r *V1Repository) loadRoot() (*v1manifest.Root, error) { root := new(v1manifest.Root) _, exists, err := r.local.LoadManifest(root) if err != nil { return nil, err } if !exists { return nil, errors.New("no trusted root in the local manifests") } return root, nil } // FetchRootManifest fetch the root manifest. func (r *V1Repository) FetchRootManifest() (root *v1manifest.Root, err error) { err = r.ensureManifests() if err != nil { return nil, err } root = new(v1manifest.Root) _, exists, err := r.local.LoadManifest(root) if err != nil { return nil, err } if !exists { return nil, errors.Errorf("no root manifest") } return root, nil } // FetchIndexManifest fetch the index manifest. func (r *V1Repository) FetchIndexManifest() (index *v1manifest.Index, err error) { err = r.ensureManifests() if err != nil { return nil, err } index = new(v1manifest.Index) _, exists, err := r.local.LoadManifest(index) if err != nil { return nil, err } if !exists { return nil, errors.Errorf("no index manifest") } return index, nil } // DownloadTiUP downloads the tiup tarball and expands it into targetDir func (r *V1Repository) DownloadTiUP(targetDir string) error { var spec = ComponentSpec{ TargetDir: targetDir, ID: TiUPBinaryName, Version: "", Force: true, } return r.UpdateComponents([]ComponentSpec{spec}) } // UpdateComponentManifests updates all components's manifest to the latest version func (r *V1Repository) UpdateComponentManifests() error { index, err := r.FetchIndexManifest() if err != nil { return err } var g errgroup.Group for name := range index.Components { g.Go(func() error { _, err := r.updateComponentManifest(name, false) if err != nil && errors.Cause(err) == ErrUnknownComponent { err = nil } return err }) } err = g.Wait() return err } // GetComponentManifest fetch the component manifest. func (r *V1Repository) GetComponentManifest(id string, withYanked bool) (com *v1manifest.Component, err error) { err = r.ensureManifests() if err != nil { return nil, err } return r.updateComponentManifest(id, withYanked) } // LocalComponentManifest load the component manifest from local. func (r *V1Repository) LocalComponentManifest(id string, withYanked bool) (com *v1manifest.Component, err error) { index := v1manifest.Index{} _, exists, err := r.local.LoadManifest(&index) if err == nil && exists { components := index.ComponentList() comp := components[id] filename := v1manifest.ComponentManifestFilename(id) componentManifest, err := r.local.LoadComponentManifest(&comp, filename) if err == nil && componentManifest != nil { return componentManifest, nil } } return r.GetComponentManifest(id, withYanked) } // ComponentVersion returns version item of a component func (r *V1Repository) ComponentVersion(id, ver string, includeYanked bool) (*v1manifest.VersionItem, error) { manifest, err := r.GetComponentManifest(id, includeYanked) if err != nil { return nil, err } if ver == utils.NightlyVersionAlias { if !manifest.HasNightly(r.PlatformString()) { return nil, errors.Annotatef(ErrUnknownVersion, "component %s does not have nightly on %s", id, r.PlatformString()) } ver = manifest.Nightly } if ver == "" { v, _, err := r.LatestStableVersion(id, includeYanked) if err != nil { return nil, err } ver = v.String() } vi := manifest.VersionItem(r.PlatformString(), ver, includeYanked) if vi == nil { return nil, errors.Annotatef(ErrUnknownVersion, "version %s on %s for component %s not found", ver, r.PlatformString(), id) } return vi, nil } // LocalComponentVersion returns version item of a component from local manifest file func (r *V1Repository) LocalComponentVersion(id, ver string, includeYanked bool) (*v1manifest.VersionItem, error) { manifest, err := r.LocalComponentManifest(id, includeYanked) if err != nil { return nil, err } if ver == utils.NightlyVersionAlias { if !manifest.HasNightly(r.PlatformString()) { return nil, errors.Annotatef(ErrUnknownVersion, "component %s does not have nightly on %s", id, r.PlatformString()) } ver = manifest.Nightly } if ver == "" { v, _, err := r.LatestStableVersion(id, includeYanked) if err != nil { return nil, err } ver = v.String() } vi := manifest.VersionItem(r.PlatformString(), ver, includeYanked) if vi == nil { return nil, errors.Annotatef(ErrUnknownVersion, "version %s on %s for component %s not found", ver, r.PlatformString(), id) } return vi, nil } func findVersionFromManifest(id, constraint, platform string, manifest *v1manifest.Component) (string, error) { cons, err := utils.NewConstraint(constraint) if err != nil { return "", err } versions := manifest.VersionList(platform) verList := make([]string, 0, len(versions)) for v := range versions { verList = append(verList, v) } sort.Slice(verList, func(p, q int) bool { return semver.Compare(verList[p], verList[q]) > 0 }) for _, v := range verList { if cons.Check(v) { return v, nil } } return "", errors.Annotatef(ErrUnknownVersion, "version %s on %s for component %s not found", constraint, platform, id) } // ResolveComponentVersionWithPlatform resolves the latest version of a component that satisfies the constraint func (r *V1Repository) ResolveComponentVersionWithPlatform(id, constraint, platform string) (utils.Version, error) { manifest, err := r.LocalComponentManifest(id, false) if err != nil { return "", err } var ver string switch constraint { case "", utils.LatestVersionAlias: v, _, err := r.LatestStableVersion(id, false) if err != nil { return "", err } ver = v.String() case utils.NightlyVersionAlias: v, _, err := r.LatestNightlyVersion(id) if err != nil { return "", err } ver = v.String() default: ver, err = findVersionFromManifest(id, constraint, platform, manifest) if err != nil { manifest, err = r.GetComponentManifest(id, false) if err != nil { return "", err } ver, err = findVersionFromManifest(id, constraint, platform, manifest) if err != nil { return "", errors.Annotatef(ErrUnknownVersion, "version %s on %s for component %s not found", constraint, platform, id) } } } return utils.Version(ver), nil } // ResolveComponentVersion resolves the latest version of a component that satisfies the constraint func (r *V1Repository) ResolveComponentVersion(id, constraint string) (utils.Version, error) { return r.ResolveComponentVersionWithPlatform(id, constraint, r.PlatformString()) } // LatestNightlyVersion returns the latest nightly version of specific component func (r *V1Repository) LatestNightlyVersion(id string) (utils.Version, *v1manifest.VersionItem, error) { com, err := r.GetComponentManifest(id, false) if err != nil { return "", nil, err } if !com.HasNightly(r.PlatformString()) { return "", nil, fmt.Errorf("component %s doesn't have nightly version on platform %s", id, r.PlatformString()) } return utils.Version(com.Nightly), com.VersionItem(r.PlatformString(), com.Nightly, false), nil } // LatestStableVersion returns the latest stable version of specific component func (r *V1Repository) LatestStableVersion(id string, withYanked bool) (utils.Version, *v1manifest.VersionItem, error) { com, err := r.GetComponentManifest(id, withYanked) if err != nil { return "", nil, err } var versions map[string]v1manifest.VersionItem if withYanked { versions = com.VersionListWithYanked(r.PlatformString()) } else { versions = com.VersionList(r.PlatformString()) } if versions == nil { return "", nil, fmt.Errorf("component %s doesn't support platform %s", id, r.PlatformString()) } var last string var lastStable string for v := range versions { if utils.Version(v).IsNightly() { continue } if last == "" || semver.Compare(last, v) < 0 { last = v } if semver.Prerelease(v) == "" && (lastStable == "" || semver.Compare(lastStable, v) < 0) { lastStable = v } } if lastStable == "" { if last == "" { return "", nil, fmt.Errorf("component %s doesn't has a stable version", id) } return utils.Version(last), com.VersionItem(r.PlatformString(), last, false), nil } return utils.Version(lastStable), com.VersionItem(r.PlatformString(), lastStable, false), nil } // BinaryPath return the binary path of the component. // Support you have install the component, need to get entry from local manifest. // Load the manifest locally only to get then Entry, do not force do something need access mirror. func (r *V1Repository) BinaryPath(installPath string, componentID string, ver string) (string, error) { // We need yanked version because we may have installed that version before it was yanked versionItem, err := r.LocalComponentVersion(componentID, ver, true) if err != nil { return "", err } entry := versionItem.Entry if entry == "" { return "", errors.Errorf("cannot found entry for %s:%s", componentID, ver) } return filepath.Join(installPath, entry), nil } tiup-1.16.3/pkg/repository/v1_repository_test.go000066400000000000000000000733531505422223000220040ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "crypto/sha256" "encoding/hex" "fmt" "io" "os" "path" "path/filepath" "strings" "testing" cjson "github.com/gibson042/canonicaljson-go" "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/crypto" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" "github.com/stretchr/testify/assert" ) // Create a profile directory // Contained stuff: // - index.json: with wrong signature // - snapshot.json: correct // - tidb.json: correct // - timestamp: with expired timestamp func genPollutedProfileDir() (string, error) { uid := uuid.New().String() dir := path.Join("/tmp", uid) wd, err := os.Getwd() if err != nil { return "", err } if err := utils.Copy(path.Join(wd, "testdata", "polluted"), dir); err != nil { return "", err } return dir, nil } func TestPollutedManifest(t *testing.T) { profileDir, err := genPollutedProfileDir() assert.Nil(t, err) defer os.RemoveAll(profileDir) profile := localdata.NewProfile(profileDir, &localdata.TiUPConfig{}) local, err := v1manifest.NewManifests(profile) assert.Nil(t, err) // Mock remote expired mirror := MockMirror{ Resources: map[string]string{ "/timestamp.json": `{"signatures":[{"keyid":"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd","sig":"V8MgDDCmfVb8N0O3unbAno8q6i2Ag1Sbr/3n12Odk8McKzZaif7OcDm1IZB5J3o7ajsBF1tduTrcO7OijJQvx8l9i6aZi9J1lb/eJpYsyvQWdzd/T7osdRkEIhtM4/sGFjGslOolTFmpA/U5IkJ+FWAi38YaFPRn8bfIPLGniRAYs4/qjLBB3RgBUlDIIVvTiJIHEHtf3Bqb5LjpEjW4XhmDK94LJbKUqfO/6oDnQzI6Rot7zBWwDQVrIHakvQxoqA5c2jtMHCXSdX9cN7aRrNO4csggMzvQot7K0JYYszlroXnsL2ioNMgcPhtoEaMLW9mFjmdgR0j1//n1mxtdWA=="}],"signed":{"_type":"timestamp","expires":"2000-08-01T14:47:48+08:00","meta":{"/snapshot.json":{"hashes":{"sha256":"24c9fa83f15eda0683999b98ac0ff87fb95aed91c10410891fb38313f38e35c1"},"length":1760}},"spec_version":"0.1.0","version":639}}`, }, } repo := NewV1Repo(&mirror, Options{}, local) err = repo.UpdateComponentManifests() assert.NotNil(t, err) assert.Contains(t, err.Error(), "has expired at") // Mock remote correct but local throw sign error mirror = MockMirror{ Resources: map[string]string{ // these files will expire after 2120 "/timestamp.json": `{"signatures":[{"keyid":"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd","sig":"Mo/o68zmRCpu5gHB9uEyVVtf5Cz432F6dd/jkKVDQXw3vG4ftnOiIRF590OfE79VFQ3BXDgMUmfD7sdkcU5Gc4HBUqKWt3vI1tFsEFfxZDQSA/upONirknxtKKZtrjDkk/rjD7cLE3S/Stul+ho0LwwlFncUZmdYaaXBeP9YileekUR15S+XvIInNO5YK+vy2EqTjMQdYTMabhZHxvP35MUMnphXuBV0aLjuvkaksF2V0mXFGcB9GUHzmgGuW32VoF2G6UBiRYaSu0r1LjTqS2auL/TciLZmi95KAAlCz3f8SOpkQ0z5bZiSk8SFCbPN1tb97RSaFJQ1jnEmxZULZA=="}],"signed":{"_type":"timestamp","expires":"2120-08-01T14:47:48+08:00","meta":{"/snapshot.json":{"hashes":{"sha256":"acb8b69c0335c6b4f3b0315dc58bdf774fca558c050dd16ae4459e3a82d39261"},"length":1760}},"spec_version":"0.1.0","version":99999}}`, "/420.index.json": `{"signatures":[{"keyid":"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61","sig":"MFVJxmU1ErcobtRcssvQiHQLTAVyCVieVpSNlgIAv6FSGN8utFpKuV3RKgelgJskt2j/gJ6k+ITpUqd1Y9iiff44N9u9oov0CBR7gYoSfSum8yEqI0AeJKePUu40959xe/1WU881npF50+LE8ovk0MC8mXzNDoe4kHWFZBve9s+VbPS1KwD2jdnf9sR95rJLF8vxMwnDwsXmdf5Y4TV9nUvQ996BRmr0YFQKBVl9DqxQ+Y2KyXjrNaZwJhKdF7I3W6fCOF4Cf5QMbZ6SG4TrPsscjscZQKJHox3iMuL6NGEem2B2lEJovrlBh0U9/cN4y6CZvEnL3uGYp7gxVvTe7A=="}],"signed":{"_type":"index","components":{"tidb":{"hidden":false,"owner":"pingcap","standalone":false,"url":"/tidb.json","yanked":false}},"default_components":[],"expires":"2121-06-23T12:05:15+08:00","owners":{"pingcap":{"keys":{"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"name":"PingCAP","threshold":1}},"spec_version":"0.1.0","version":420}}`, }, } repo = NewV1Repo(&mirror, Options{}, local) err = repo.UpdateComponentManifests() assert.Nil(t, err) } func TestFnameWithVersion(t *testing.T) { tests := []struct { name string version uint versionName string }{ {"root.json", 1, "1.root.json"}, {"/root.json", 1, "/1.root.json"}, } for _, test := range tests { fname := FnameWithVersion(test.name, test.version) assert.Equal(t, test.versionName, fname) } } func TestCheckTimestamp(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() privk := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) repoTimestamp := timestampManifest() // Test that no local timestamp => return changed = true mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, repoTimestamp, privk) changed, manifest, err := repo.fetchTimestamp() assert.Nil(t, err) err = local.SaveManifest(manifest, v1manifest.ManifestFilenameTimestamp) assert.Nil(t, err) assert.NotNil(t, manifest) tmp := manifest.Signed.(*v1manifest.Timestamp).SnapshotHash() hash := &tmp assert.NotNil(t, hash) assert.Equal(t, changed, true) assert.Equal(t, uint(1001), hash.Length) assert.Equal(t, "123456", hash.Hashes[v1manifest.SHA256]) assert.Contains(t, local.Saved, v1manifest.ManifestFilenameTimestamp) changed, manifest, err = repo.fetchTimestamp() assert.Nil(t, err) err = local.SaveManifest(manifest, v1manifest.ManifestFilenameTimestamp) assert.Nil(t, err) assert.NotNil(t, manifest) tmp = manifest.Signed.(*v1manifest.Timestamp).SnapshotHash() hash = &tmp assert.NotNil(t, hash) assert.Equal(t, changed, false) // Test that an expired manifest from the mirror causes an error expiredTimestamp := timestampManifest() expiredTimestamp.Expires = "2000-05-12T04:51:08Z" mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, expiredTimestamp) repo.PurgeTimestamp() _, _, err = repo.fetchTimestamp() assert.NotNil(t, err) // Test that an invalid manifest from the mirror causes an error invalidTimestamp := timestampManifest() invalidTimestamp.SpecVersion = "10.1.0" _, _, err = repo.fetchTimestamp() assert.NotNil(t, err) // TODO test that a bad signature causes an error } func TestCacheTimestamp(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() privk := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) repoTimestamp := timestampManifest() mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, repoTimestamp, privk) changed, _, err := repo.fetchTimestamp() assert.Nil(t, err) assert.True(t, changed) delete(mirror.Resources, v1manifest.ManifestURLTimestamp) changed, _, err = repo.fetchTimestamp() assert.Nil(t, err) assert.False(t, changed) repo.PurgeTimestamp() _, _, err = repo.fetchTimestamp() assert.NotNil(t, err) } func TestUpdateLocalSnapshot(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() privk := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) timestamp := timestampManifest() snapshotManifest := snapshotManifest() serSnap := serialize(t, snapshotManifest, privk) mirror.Resources[v1manifest.ManifestURLSnapshot] = serSnap timestamp.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(serSnap) signedTs := serialize(t, timestamp, privk) mirror.Resources[v1manifest.ManifestURLTimestamp] = signedTs ts := &v1manifest.Manifest{Signed: &v1manifest.Timestamp{}} err := cjson.Unmarshal([]byte(signedTs), ts) assert.Nil(t, err) local.Manifests[v1manifest.ManifestFilenameTimestamp] = ts // test that out of date timestamp downloads and saves snapshot timestamp.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = "an old hash" timestamp.Version-- snapshot, err := repo.updateLocalSnapshot() assert.Nil(t, err) assert.NotNil(t, snapshot) assert.Contains(t, local.Saved, v1manifest.ManifestFilenameSnapshot) // test that if snapshot unchanged, no update of snapshot is performed timestamp.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(serSnap) timestamp.Version += 2 mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, timestamp, privk) local.Saved = local.Saved[:0] snapshot, err = repo.updateLocalSnapshot() assert.Nil(t, err) assert.NotNil(t, snapshot) assert.NotContains(t, local.Saved, v1manifest.ManifestFilenameSnapshot) // delete the local snapshot will fetch and save it again delete(local.Manifests, v1manifest.ManifestFilenameSnapshot) snapshot, err = repo.updateLocalSnapshot() assert.Nil(t, err) assert.NotNil(t, snapshot) assert.Contains(t, local.Saved, v1manifest.ManifestFilenameSnapshot) // test that invalid snapshot will causes an error snapshotManifest.Expires = "2000-05-11T04:51:08Z" mirror.Resources[v1manifest.ManifestURLSnapshot] = serialize(t, snapshotManifest, privk) local.Manifests[v1manifest.ManifestFilenameTimestamp] = ts delete(local.Manifests, v1manifest.ManifestFilenameSnapshot) local.Saved = local.Saved[:0] snapshot, err = repo.updateLocalSnapshot() assert.NotNil(t, err) assert.Nil(t, snapshot) assert.NotContains(t, local.Saved, v1manifest.ManifestFilenameSnapshot) // TODO test an expired manifest will cause an error or force update // TODO test that invalid signature of snapshot causes an error // TODO test that signature error on timestamp causes root to be reloaded and timestamp to be rechecked } func TestUpdateLocalRoot(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() privKey := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) // Should success if no new version root. err := repo.updateLocalRoot() assert.Nil(t, err) root2, privKey2 := rootManifest(t) root, _ := repo.loadRoot() fname := fmt.Sprintf("/%d.root.json", root.Version+1) mirror.Resources[fname] = serialize(t, root2, privKey, privKey2) // Fail cause wrong version err = repo.updateLocalRoot() assert.NotNil(t, err) // Fix Version but the new root expired. root2.Version = root.Version + 1 root2.Expires = "2000-05-11T04:51:08Z" mirror.Resources[fname] = serialize(t, root2, privKey, privKey2) err = repo.updateLocalRoot() assert.NotNil(t, err) // Fix Expires, should success now. root2.Expires = "2222-05-11T04:51:08Z" mirror.Resources[fname] = serialize(t, root2, privKey, privKey2) err = repo.updateLocalRoot() assert.Nil(t, err) } func TestUpdateIndex(t *testing.T) { // Test that updating succeeds with a valid manifest and local manifests. mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() priv := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) index, _ := indexManifest(t) snapshot := snapshotManifest() serIndex := serialize(t, index, priv) mirror.Resources["/5.index.json"] = serIndex local.Manifests[v1manifest.ManifestFilenameSnapshot] = &v1manifest.Manifest{Signed: snapshot} index.Version-- local.Manifests[v1manifest.ManifestFilenameIndex] = &v1manifest.Manifest{Signed: index} err := repo.updateLocalIndex(snapshot) assert.Nil(t, err) assert.Contains(t, local.Saved, "index.json") // TODO test that invalid signature of snapshot causes an error } func TestYanked(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() _ = setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) index, indexPriv := indexManifest(t) snapshot := snapshotManifest() bar := componentManifest() serBar := serialize(t, bar, indexPriv) mirror.Resources["/7.bar.json"] = serBar local.Manifests[v1manifest.ManifestFilenameSnapshot] = &v1manifest.Manifest{Signed: snapshot} local.Manifests[v1manifest.ManifestFilenameIndex] = &v1manifest.Manifest{Signed: index} // Test yanked version updated, err := repo.updateComponentManifest("bar", true) assert.Nil(t, err) _, ok := updated.VersionList("plat/form")["v2.0.3"] assert.False(t, ok) _, err = repo.updateComponentManifest("bar", false) assert.NotNil(t, err) assert.Equal(t, errors.Cause(err), ErrUnknownComponent) } func TestUpdateComponent(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() _ = setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) index, indexPriv := indexManifest(t) snapshot := snapshotManifest() foo := componentManifest() serFoo := serialize(t, foo, indexPriv) mirror.Resources["/7.foo.json"] = serFoo local.Manifests[v1manifest.ManifestFilenameSnapshot] = &v1manifest.Manifest{Signed: snapshot} local.Manifests[v1manifest.ManifestFilenameIndex] = &v1manifest.Manifest{Signed: index} // Test happy path updated, err := repo.updateComponentManifest("foo", false) assert.Nil(t, err) t.Logf("%+v", err) assert.NotNil(t, updated) assert.Contains(t, local.Saved, "foo.json") // Test that decrementing version numbers cause an error oldFoo := componentManifest() oldFoo.Version = 8 local.Manifests["foo.json"] = &v1manifest.Manifest{Signed: oldFoo} local.Saved = []string{} updated, err = repo.updateComponentManifest("foo", false) assert.NotNil(t, err) assert.Nil(t, updated) assert.Empty(t, local.Saved) // Test that id missing from index causes an error updated, err = repo.updateComponentManifest("bar", false) assert.NotNil(t, err) assert.Nil(t, updated) assert.Empty(t, local.Saved) // TODO check that the correct files were created // TODO test that invalid signature of component manifest causes an error } func TestEnsureManifests(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() priv := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{}, local) index, _ := indexManifest(t) snapshot := snapshotManifest() snapStr := serialize(t, snapshot, priv) ts := timestampManifest() ts.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(snapStr) indexURL, _, _ := snapshot.VersionedURL(v1manifest.ManifestURLIndex) mirror.Resources[indexURL] = serialize(t, index, priv) mirror.Resources[v1manifest.ManifestURLSnapshot] = snapStr mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, ts, priv) // Initial repo err := repo.ensureManifests() assert.Nil(t, err) assert.Contains(t, local.Saved, v1manifest.ManifestFilenameIndex) assert.Contains(t, local.Saved, v1manifest.ManifestFilenameSnapshot) assert.Contains(t, local.Saved, v1manifest.ManifestFilenameTimestamp) assert.NotContains(t, local.Saved, v1manifest.ManifestFilenameRoot) // Happy update root2, priv2 := rootManifest(t) // generate new root key root, _ := repo.loadRoot() root2.Version = root.Version + 1 mirror.Resources["/43.root.json"] = serialize(t, root2, priv, priv2) rootMeta := snapshot.Meta[v1manifest.ManifestURLRoot] rootMeta.Version = root2.Version snapshot.Meta[v1manifest.ManifestURLRoot] = rootMeta snapStr = serialize(t, snapshot, priv2) // sign snapshot with new key ts.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(snapStr) ts.Version++ mirror.Resources[v1manifest.ManifestURLSnapshot] = snapStr mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, ts, priv2) local.Saved = []string{} repo.PurgeTimestamp() err = repo.ensureManifests() assert.Nil(t, err) assert.Contains(t, local.Saved, v1manifest.ManifestFilenameRoot) // Sad path - root and snapshot disagree on version rootMeta.Version = 500 snapshot.Meta[v1manifest.ManifestURLRoot] = rootMeta snapStr = serialize(t, snapshot, priv) ts.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(snapStr) ts.Version++ mirror.Resources[v1manifest.ManifestURLSnapshot] = snapStr mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, ts, priv) repo.PurgeTimestamp() err = repo.ensureManifests() assert.NotNil(t, err) } func TestLatestStableVersion(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() priv := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{GOOS: "plat", GOARCH: "form"}, local) index, indexPriv := indexManifest(t) snapshot := snapshotManifest() snapStr := serialize(t, snapshot, priv) ts := timestampManifest() ts.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(snapStr) // v2.0.1: unyanked // v2.0.3: yanked // v3.0.0-rc: unyanked foo := componentManifest() indexURL, _, _ := snapshot.VersionedURL(v1manifest.ManifestURLIndex) mirror.Resources[indexURL] = serialize(t, index, priv) mirror.Resources[v1manifest.ManifestURLSnapshot] = snapStr mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, ts, priv) mirror.Resources["/7.foo.json"] = serialize(t, foo, indexPriv) v, _, err := repo.LatestStableVersion("foo", false) assert.Nil(t, err) assert.Equal(t, "v2.0.1", v.String()) v, _, err = repo.LatestStableVersion("foo", true) assert.Nil(t, err) assert.Equal(t, "v2.0.3", v.String()) } func TestLatestStableVersionWithPrerelease(t *testing.T) { mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() priv := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{GOOS: "plat", GOARCH: "form"}, local) index, indexPriv := indexManifest(t) snapshot := snapshotManifest() snapStr := serialize(t, snapshot, priv) ts := timestampManifest() ts.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(snapStr) foo := componentManifest() // v2.0.1: yanked // v2.0.3: yanked // v3.0.0-rc: unyanked item := foo.Platforms["plat/form"]["v2.0.1"] item.Yanked = true foo.Platforms["plat/form"]["v2.0.1"] = item indexURL, _, _ := snapshot.VersionedURL(v1manifest.ManifestURLIndex) mirror.Resources[indexURL] = serialize(t, index, priv) mirror.Resources[v1manifest.ManifestURLSnapshot] = snapStr mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, ts, priv) mirror.Resources["/7.foo.json"] = serialize(t, foo, indexPriv) v, _, err := repo.LatestStableVersion("foo", false) assert.Nil(t, err) assert.Equal(t, "v3.0.0-rc", v.String()) } func TestUpdateComponents(t *testing.T) { t1 := t.TempDir() t2 := t.TempDir() mirror := MockMirror{ Resources: map[string]string{}, } local := v1manifest.NewMockManifests() local.RootDir = t1 priv := setNewRoot(t, local) repo := NewV1Repo(&mirror, Options{GOOS: "plat", GOARCH: "form"}, local) index, indexPriv := indexManifest(t) snapshot := snapshotManifest() snapStr := serialize(t, snapshot, priv) ts := timestampManifest() ts.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(snapStr) foo := componentManifest() indexURL, _, _ := snapshot.VersionedURL(v1manifest.ManifestURLIndex) mirror.Resources[indexURL] = serialize(t, index, priv) mirror.Resources[v1manifest.ManifestURLSnapshot] = snapStr mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, ts, priv) mirror.Resources["/7.foo.json"] = serialize(t, foo, indexPriv) mirror.Resources["/foo-2.0.1.tar.gz"] = "foo201" mirror.Resources["/foo-3.0.0-rc.tar.gz"] = "foo300rc" // Install err := repo.UpdateComponents([]ComponentSpec{{ ID: "foo", }}) assert.Nil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.1", local.Installed["foo"].Version) assert.Equal(t, "foo201", local.Installed["foo"].Contents) assert.Equal(t, filepath.Join(t1, "components/foo/v2.0.1/foo-2.0.1.tar.gz"), local.Installed["foo"].BinaryPath) // Update foo.Version = 8 foo.Platforms["plat/form"]["v2.0.2"] = versionItem2() mirror.Resources["/8.foo.json"] = serialize(t, foo, indexPriv) mirror.Resources["/foo-2.0.2.tar.gz"] = "foo202" snapshot.Version++ item := snapshot.Meta["/foo.json"] item.Version = 8 snapshot.Meta["/foo.json"] = item snapStr = serialize(t, snapshot, priv) ts.Meta[v1manifest.ManifestURLSnapshot].Hashes[v1manifest.SHA256] = hash(snapStr) ts.Version++ mirror.Resources[v1manifest.ManifestURLSnapshot] = snapStr mirror.Resources[v1manifest.ManifestURLTimestamp] = serialize(t, ts, priv) repo.PurgeTimestamp() err = repo.UpdateComponents([]ComponentSpec{{ ID: "foo", TargetDir: t2, }}) assert.Nil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.2", local.Installed["foo"].Version) assert.Equal(t, "foo202", local.Installed["foo"].Contents) assert.Equal(t, filepath.Join(t2, "foo-2.0.2.tar.gz"), local.Installed["foo"].BinaryPath) // Update; already up to date err = repo.UpdateComponents([]ComponentSpec{{ ID: "foo", }}) assert.Nil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.2", local.Installed["foo"].Version) assert.Equal(t, "foo202", local.Installed["foo"].Contents) // Specific version err = repo.UpdateComponents([]ComponentSpec{{ ID: "foo", Version: "v2.0.1", }}) assert.Nil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.1", local.Installed["foo"].Version) assert.Equal(t, "foo201", local.Installed["foo"].Contents) // Sad paths // Component doesn't exists err = repo.UpdateComponents([]ComponentSpec{{ ID: "bar", }}) assert.Nil(t, err) _, ok := local.Installed["bar"] assert.False(t, ok) // Specific version doesn't exist err = repo.UpdateComponents([]ComponentSpec{{ ID: "foo", Version: "v2.0.3", }}) assert.NotNil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.1", local.Installed["foo"].Version) assert.Equal(t, "foo201", local.Installed["foo"].Contents) // Platform not supported repo.GOARCH = "sdfsadfsadf" err = repo.UpdateComponents([]ComponentSpec{{ ID: "foo", }}) assert.NotNil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.1", local.Installed["foo"].Version) assert.Equal(t, "foo201", local.Installed["foo"].Contents) // Already installed repo.GOARCH = "form" err = repo.UpdateComponents([]ComponentSpec{{ ID: "foo", Version: "v2.0.1", }}) assert.Nil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.1", local.Installed["foo"].Version) assert.Equal(t, "foo201", local.Installed["foo"].Contents) // Test that even after one error, other components are handled repo.GOARCH = "form" err = repo.UpdateComponents([]ComponentSpec{{ ID: "bar", }, { ID: "foo", }}) assert.Nil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v2.0.2", local.Installed["foo"].Version) assert.Equal(t, "foo202", local.Installed["foo"].Contents) // Install preprelease version // Specific version err = repo.UpdateComponents([]ComponentSpec{{ ID: "foo", Version: "v3.0.0-rc", }}) assert.Nil(t, err) assert.Equal(t, 1, len(local.Installed)) assert.Equal(t, "v3.0.0-rc", local.Installed["foo"].Version) assert.Equal(t, "foo300rc", local.Installed["foo"].Contents) } func timestampManifest() *v1manifest.Timestamp { return &v1manifest.Timestamp{ SignedBase: v1manifest.SignedBase{ Ty: v1manifest.ManifestTypeTimestamp, SpecVersion: "0.1.0", Expires: "2220-05-11T04:51:08Z", Version: 42, }, Meta: map[string]v1manifest.FileHash{v1manifest.ManifestURLSnapshot: { Hashes: map[string]string{v1manifest.SHA256: "123456"}, Length: 1001, }}, } } func snapshotManifest() *v1manifest.Snapshot { return &v1manifest.Snapshot{ SignedBase: v1manifest.SignedBase{ Ty: v1manifest.ManifestTypeSnapshot, SpecVersion: "0.1.0", Expires: "2220-05-11T04:51:08Z", Version: 42, }, Meta: map[string]v1manifest.FileVersion{ v1manifest.ManifestURLRoot: {Version: 42}, v1manifest.ManifestURLIndex: {Version: 5}, "/foo.json": {Version: 7}, "/bar.json": {Version: 7}, }, } } func componentManifest() *v1manifest.Component { return &v1manifest.Component{ SignedBase: v1manifest.SignedBase{ Ty: v1manifest.ManifestTypeComponent, SpecVersion: "0.1.0", Expires: "2220-05-11T04:51:08Z", Version: 7, }, ID: "foo", Description: "foo does stuff", Platforms: map[string]map[string]v1manifest.VersionItem{ "plat/form": { "v2.0.1": versionItem(), "v2.0.3": versionItem3(), "v3.0.0-rc": versionItemPrerelease(), }, }, } } func versionItemPrerelease() v1manifest.VersionItem { return v1manifest.VersionItem{ URL: "/foo-3.0.0-rc.tar.gz", Entry: "dummy", FileHash: v1manifest.FileHash{ Hashes: map[string]string{v1manifest.SHA256: "0cd2f56431d966c8897c87193539aabb3ffb34b1c55aad4b8a03dd6421cec5aa"}, Length: 28, }, } } func versionItem() v1manifest.VersionItem { return v1manifest.VersionItem{ URL: "/foo-2.0.1.tar.gz", Entry: "dummy", FileHash: v1manifest.FileHash{ Hashes: map[string]string{v1manifest.SHA256: "8dc7102c0d675dfa53da273317b9f627e96ed24efeecc8c5ebd00dc06f4e09c3"}, Length: 28, }, } } func versionItem2() v1manifest.VersionItem { return v1manifest.VersionItem{ URL: "/foo-2.0.2.tar.gz", Entry: "dummy", FileHash: v1manifest.FileHash{ Hashes: map[string]string{v1manifest.SHA256: "5abe91bc22039c15c05580062357be7ab0bfd7968582a118fbb4eb817ddc2e76"}, Length: 12, }, } } func versionItem3() v1manifest.VersionItem { return v1manifest.VersionItem{ URL: "/foo-2.0.3.tar.gz", Entry: "dummy", Yanked: true, FileHash: v1manifest.FileHash{ Hashes: map[string]string{v1manifest.SHA256: "5abe91bc22039c15c05580062357be7ab0bfd7968582a118fbb4eb817ddc2e76"}, Length: 12, }, } } func indexManifest(t *testing.T) (*v1manifest.Index, crypto.PrivKey) { info, keyID, priv, err := v1manifest.FreshKeyInfo() assert.Nil(t, err) bytes, err := priv.Serialize() assert.Nil(t, err) privKeyInfo := v1manifest.NewKeyInfo(bytes) // The signed id will be priveID and it should be equal as keyID privID, err := privKeyInfo.ID() assert.Nil(t, err) assert.Equal(t, keyID, privID) return &v1manifest.Index{ SignedBase: v1manifest.SignedBase{ Ty: v1manifest.ManifestTypeIndex, SpecVersion: "0.1.0", Expires: "2220-05-11T04:51:08Z", Version: 5, }, Owners: map[string]v1manifest.Owner{"bar": { Name: "Bar", Keys: map[string]*v1manifest.KeyInfo{keyID: info}, Threshold: 1, }}, Components: map[string]v1manifest.ComponentItem{ "foo": { Yanked: false, Owner: "bar", URL: "/foo.json", }, "bar": { Yanked: true, Owner: "bar", URL: "/bar.json", }, }, DefaultComponents: []string{}, }, priv } func rootManifest(t *testing.T) (*v1manifest.Root, crypto.PrivKey) { info, keyID, priv, err := v1manifest.FreshKeyInfo() assert.Nil(t, err) id, err := info.ID() assert.Nil(t, err) bytes, err := priv.Serialize() assert.Nil(t, err) privKeyInfo := v1manifest.NewKeyInfo(bytes) // The signed id will be priveID and it should be equal as keyID privID, err := privKeyInfo.ID() assert.Nil(t, err) assert.Equal(t, keyID, privID) t.Log("keyID: ", keyID) t.Log("id: ", id) t.Log("privKeyInfo id: ", privID) // t.Logf("info: %+v\n", info) // t.Logf("pinfo: %+v\n", privKeyInfo) return &v1manifest.Root{ SignedBase: v1manifest.SignedBase{ Ty: v1manifest.ManifestTypeRoot, SpecVersion: "0.1.0", Expires: "2220-05-11T04:51:08Z", Version: 42, }, Roles: map[string]*v1manifest.Role{ v1manifest.ManifestTypeIndex: { URL: v1manifest.ManifestURLIndex, Keys: map[string]*v1manifest.KeyInfo{keyID: info}, Threshold: 1, }, v1manifest.ManifestTypeRoot: { URL: v1manifest.ManifestURLRoot, Keys: map[string]*v1manifest.KeyInfo{keyID: info}, Threshold: 1, }, v1manifest.ManifestTypeTimestamp: { URL: v1manifest.ManifestURLTimestamp, Keys: map[string]*v1manifest.KeyInfo{keyID: info}, Threshold: 1, }, v1manifest.ManifestTypeSnapshot: { URL: v1manifest.ManifestURLSnapshot, Keys: map[string]*v1manifest.KeyInfo{keyID: info}, Threshold: 1, }, }, }, priv } func setNewRoot(t *testing.T, local *v1manifest.MockManifests) crypto.PrivKey { root, privk := rootManifest(t) setRoot(local, root) return privk } func setRoot(local *v1manifest.MockManifests, root *v1manifest.Root) { local.Manifests[v1manifest.ManifestFilenameRoot] = &v1manifest.Manifest{Signed: root} for r, ks := range root.Roles { _ = local.Ks.AddKeys(r, 1, "2220-05-11T04:51:08Z", ks.Keys) } } func serialize(t *testing.T, role v1manifest.ValidManifest, privKeys ...crypto.PrivKey) string { var keyInfos []*v1manifest.KeyInfo var priv crypto.PrivKey if len(privKeys) > 0 { for _, priv := range privKeys { bytes, err := priv.Serialize() assert.Nil(t, err) keyInfo := v1manifest.NewKeyInfo(bytes) keyInfos = append(keyInfos, keyInfo) } } else { // just use a generate one var err error priv, err = crypto.NewKeyPair(crypto.KeyTypeRSA, crypto.KeySchemeRSASSAPSSSHA256) assert.Nil(t, err) bytes, err := priv.Serialize() assert.Nil(t, err) keyInfo := v1manifest.NewKeyInfo(bytes) keyInfos = append(keyInfos, keyInfo) } var out strings.Builder err := v1manifest.SignAndWrite(&out, role, keyInfos...) assert.Nil(t, err) return out.String() } func hash(s string) string { shaWriter := sha256.New() if _, err := io.Copy(shaWriter, strings.NewReader(s)); err != nil { panic(err) } return hex.EncodeToString(shaWriter.Sum(nil)) } tiup-1.16.3/pkg/repository/v1manifest/000077500000000000000000000000001505422223000176335ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/v1manifest/key_store.go000066400000000000000000000104131505422223000221650ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "fmt" "sync" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/crypto" ) // KeyStore tracks roles, keys, etc. and verifies signatures against this metadata. (map[string]roleKeys) type KeyStore struct { sync.Map } type roleKeys struct { threshold uint expiry string // key id -> public key (map[string]crypto.PubKey) keys *sync.Map } // NewKeyStore return a KeyStore func NewKeyStore() *KeyStore { return &KeyStore{} } // AddKeys clears all keys for role, then adds all supplied keys and stores the threshold value. func (s *KeyStore) AddKeys(role string, threshold uint, expiry string, keys map[string]*KeyInfo) error { if threshold == 0 { return errors.Errorf("invalid threshold (0)") } rk := roleKeys{threshold: threshold, expiry: expiry, keys: &sync.Map{}} for id, info := range keys { pub, err := info.publicKey() if err != nil { return err } rk.keys.Store(id, pub) } s.Store(role, rk) return nil } func newSignatureError(fname string, err error) *SignatureError { return &SignatureError{ fname: fname, err: err, } } // IsSignatureError check if the err is SignatureError. func IsSignatureError(err error) bool { _, ok := err.(*SignatureError) return ok } // SignatureError the signature of a file is incorrect. type SignatureError struct { fname string err error } func (s *SignatureError) Error() string { return fmt.Sprintf("invalid signature for file %s: %s", s.fname, s.err.Error()) } // transitionRoot checks that signed is verified by signatures using newThreshold, and if so, updates the keys for the root // role in the key store. func (s *KeyStore) transitionRoot(signed []byte, newThreshold uint, expiry string, signatures []Signature, newKeys map[string]*KeyInfo) error { if s == nil { return nil } oldKeys, hasOldKeys := s.Load(ManifestTypeRoot) err := s.AddKeys(ManifestTypeRoot, newThreshold, expiry, newKeys) if err != nil { return err } err = s.verifySignature(signed, ManifestTypeRoot, signatures, ManifestFilenameRoot) if err != nil { // Restore the old root keys. if hasOldKeys { s.Store(ManifestTypeRoot, oldKeys) } return err } return nil } // verifySignature verifies all supplied signatures are correct for signed. Also, that there are at least threshold signatures, // and that they all belong to the correct role and are correct for signed. It is permissible for signature keys to not // exist (they will be ignored, and not count towards the threshold) but not for a signature to be incorrect. func (s *KeyStore) verifySignature(signed []byte, role string, signatures []Signature, filename string) error { if s == nil { return nil } // Check for duplicate signatures. has := make(map[string]struct{}) for _, sig := range signatures { if _, ok := has[sig.KeyID]; ok { return newSignatureError(filename, errors.Errorf("signature section of %s contains duplicate signatures", filename)) } has[sig.KeyID] = struct{}{} } ks, ok := s.Load(role) if !ok { return errors.Errorf("Unknown role %s", role) } keys := ks.(roleKeys) var validSigs uint for _, sig := range signatures { key, ok := keys.keys.Load(sig.KeyID) if !ok { continue } err := key.(crypto.PubKey).VerifySignature(signed, sig.Sig) if err != nil { return newSignatureError(filename, err) } validSigs++ } // We may need to verify the root manifest with old keys. Once the most up to date root is found and verified, then // the keys used to do so should be checked for expiry. if role != ManifestTypeRoot { if err := CheckExpiry(filename, keys.expiry); err != nil { return err } } if validSigs < keys.threshold { return newSignatureError(filename, errors.Errorf("not enough signatures (%v) for threshold %v in %s", validSigs, keys.threshold, filename)) } return nil } tiup-1.16.3/pkg/repository/v1manifest/keys.go000066400000000000000000000075141505422223000211440ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "crypto/sha256" "fmt" cjson "github.com/gibson042/canonicaljson-go" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/crypto" ) // ShortKeyIDLength is the number of bytes used for filenames const ShortKeyIDLength = 16 // ErrorNotPrivateKey indicate that it need a private key, but the supplied is not. var ErrorNotPrivateKey = errors.New("not a private key") // NewKeyInfo make KeyInfo from private key, public key should be load from json func NewKeyInfo(privKey []byte) *KeyInfo { // TODO: support other key type and scheme return &KeyInfo{ Type: crypto.KeyTypeRSA, Scheme: crypto.KeySchemeRSASSAPSSSHA256, Value: map[string]string{ "private": string(privKey), }, } } // GenKeyInfo generate a new private KeyInfo func GenKeyInfo() (*KeyInfo, error) { // TODO: support other key type and scheme priv, err := crypto.NewKeyPair(crypto.KeyTypeRSA, crypto.KeySchemeRSASSAPSSSHA256) if err != nil { return nil, err } bytes, err := priv.Serialize() if err != nil { return nil, err } return NewKeyInfo(bytes), nil } // ID returns the hash id of the key func (ki *KeyInfo) ID() (string, error) { // Make sure private key and correspond public key has the same id pk, err := ki.publicKey() if err != nil { return "", err } data, err := pk.Serialize() if err != nil { return "", err } value := map[string]string{ "public": string(data), } payload, err := cjson.Marshal(KeyInfo{ Type: ki.Type, Scheme: ki.Scheme, Value: value, }) if err != nil { // XXX: maybe we can assume that the error should always be nil since the KeyInfo struct is valid return "", err } sum := sha256.Sum256(payload) return fmt.Sprintf("%x", sum), nil } // IsPrivate detect if this is a private key func (ki *KeyInfo) IsPrivate() bool { return len(ki.Value["private"]) > 0 } // Signature sign a signature with the key for payload func (ki *KeyInfo) Signature(payload []byte) (string, error) { pk, err := ki.privateKey() if err != nil { return "", err } return pk.Signature(payload) } // SignManifest wrap Signature with the param manifest func (ki *KeyInfo) SignManifest(m ValidManifest) (string, error) { payload, err := cjson.Marshal(m) if err != nil { return "", errors.Annotate(err, "marshal for signature") } return ki.Signature(payload) } // Verify check the signature is right func (ki *KeyInfo) Verify(payload []byte, sig string) error { pk, err := ki.publicKey() if err != nil { return err } return pk.VerifySignature(payload, sig) } // Public returns the public keyInfo func (ki *KeyInfo) Public() (*KeyInfo, error) { pk, err := ki.publicKey() if err != nil { return nil, err } bytes, err := pk.Serialize() if err != nil { return nil, err } return &KeyInfo{ Type: pk.Type(), Scheme: pk.Scheme(), Value: map[string]string{ "public": string(bytes), }, }, nil } // publicKey returns PubKey func (ki *KeyInfo) publicKey() (crypto.PubKey, error) { if ki.IsPrivate() { priv, err := ki.privateKey() if err != nil { return nil, err } return priv.Public(), nil } return crypto.NewPubKey(ki.Type, ki.Scheme, []byte(ki.Value["public"])) } // privateKey returns PrivKey func (ki *KeyInfo) privateKey() (crypto.PrivKey, error) { if !ki.IsPrivate() { return nil, ErrorNotPrivateKey } return crypto.NewPrivKey(ki.Type, ki.Scheme, []byte(ki.Value["private"])) } tiup-1.16.3/pkg/repository/v1manifest/keys_test.go000066400000000000000000000054051505422223000222000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. //go:debug rsa1024min=0 package v1manifest import ( "testing" "github.com/stretchr/testify/require" ) var ( publicTestKey = []byte(`-----BEGIN PUBLIC KEY----- MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALqbHeRLCyOdykC5SDLqI49ArYGYG1mq aH9/GnWjGavZM02fos4lc2w6tCchcUBNtJvGqKwhC5JEnx3RYoSX2ucCAwEAAQ== -----END PUBLIC KEY----- `) privateTestKey = []byte(` -----BEGIN RSA PRIVATE KEY----- MIIBPQIBAAJBALqbHeRLCyOdykC5SDLqI49ArYGYG1mqaH9/GnWjGavZM02fos4l c2w6tCchcUBNtJvGqKwhC5JEnx3RYoSX2ucCAwEAAQJBAKn6O+tFFDt4MtBsNcDz GDsYDjQbCubNW+yvKbn4PJ0UZoEebwmvH1ouKaUuacJcsiQkKzTHleu4krYGUGO1 mEECIQD0dUhj71vb1rN1pmTOhQOGB9GN1mygcxaIFOWW8znLRwIhAMNqlfLijUs6 rY+h1pJa/3Fh1HTSOCCCCWA0NRFnMANhAiEAwddKGqxPO6goz26s2rHQlHQYr47K vgPkZu2jDCo7trsCIQC/PSfRsnSkEqCX18GtKPCjfSH10WSsK5YRWAY3KcyLAQIh AL70wdUu5jMm2ex5cZGkZLRB50yE6rBiHCd5W1WdTFoe -----END RSA PRIVATE KEY----- `) ) var cryptoCases = [][]byte{ []byte(`TiDB is an awesome database`), []byte(`I like coding...`), []byte(`I hate talking...`), []byte(`Junk food is good`), } func TestKeyInfoIdentity(t *testing.T) { priv := NewKeyInfo(privateTestKey) require.True(t, priv.IsPrivate()) pub1, err := priv.Public() require.Nil(t, err) pub2, err := priv.Public() require.Nil(t, err) pub3, err := pub2.Public() require.Nil(t, err) require.Equal(t, pub1.Value["public"], pub2.Value["public"]) require.Equal(t, pub1.Value["public"], pub3.Value["public"]) require.Equal(t, pub1.Value["public"], string(publicTestKey)) id1, err := pub1.ID() require.Nil(t, err) id2, err := pub2.ID() require.Nil(t, err) id3, err := pub3.ID() require.Nil(t, err) require.Equal(t, id1, id2) require.Equal(t, id1, id3) } func TestKeyInfoID(t *testing.T) { priv := NewKeyInfo(privateTestKey) require.True(t, priv.IsPrivate()) pub, err := priv.Public() require.Nil(t, err) require.True(t, !pub.IsPrivate()) pubid, err := pub.ID() require.Nil(t, err) privid, err := pub.ID() require.Nil(t, err) require.NotEmpty(t, pubid) require.Equal(t, pubid, privid) } func TestKeyInfoSigAndVerify(t *testing.T) { pri := NewKeyInfo(privateTestKey) require.True(t, pri.IsPrivate()) pub, err := pri.Public() require.Nil(t, err) for _, cas := range cryptoCases { sig, err := pri.Signature(cas) require.Nil(t, err) require.Nil(t, pub.Verify(cas, sig)) } } tiup-1.16.3/pkg/repository/v1manifest/local_manifests.go000066400000000000000000000273641505422223000233410ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "encoding/json" "fmt" "io" "os" "path/filepath" "strings" "sync" cjson "github.com/gibson042/canonicaljson-go" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/utils" ) // LocalManifests methods for accessing a store of manifests. type LocalManifests interface { // SaveManifest saves a manifest to disk, it will overwrite filename if it exists. SaveManifest(manifest *Manifest, filename string) error // SaveComponentManifest saves a component manifest to disk, it will overwrite filename if it exists. SaveComponentManifest(manifest *Manifest, filename string) error // LoadManifest loads and validates the most recent manifest of role's type. The returned bool is true if the file // exists. LoadManifest(role ValidManifest) (*Manifest, bool, error) // LoadComponentManifest loads and validates the most recent manifest at filename. LoadComponentManifest(item *ComponentItem, filename string) (*Component, error) // ComponentInstalled is true if the version of component is present locally. ComponentInstalled(component, version string) (bool, error) // InstallComponent installs the component from the reader. InstallComponent(reader io.Reader, targetDir, component, version, filename string, noExpand bool) error // Return the local key store. KeyStore() *KeyStore // ManifestVersion opens filename, if it exists and is a manifest, returns its manifest version number. Otherwise // returns 0. ManifestVersion(filename string) uint // TargetRootDir returns the root directory of target TargetRootDir() string } // FsManifests represents a collection of v1 manifests on disk. // Invariant: any manifest written to disk should be valid, but may have expired. (It is also possible the manifest was // ok when written and has expired since). type FsManifests struct { profile *localdata.Profile keys *KeyStore cache sync.Map // map[string]string } // FIXME implement garbage collection of old manifests // NewManifests creates a new FsManifests with local store at root. // There must exist a trusted root.json. func NewManifests(profile *localdata.Profile) (*FsManifests, error) { result := &FsManifests{profile: profile, keys: NewKeyStore()} // Load the root manifest. manifest, err := result.load(ManifestFilenameRoot) if err != nil { return nil, err } // We must load without validation because we have no keys yet. var root Root _, err = ReadNoVerify(strings.NewReader(manifest), &root) if err != nil { return nil, err } // Populate our key store from the root manifest. err = loadKeys(&root, result.keys) if err != nil { return nil, err } // Now that we've bootstrapped the key store, we can verify the root manifest we loaded earlier. _, err = ReadManifest(strings.NewReader(manifest), &root, result.keys) if err != nil { return nil, err } result.cache.Store(ManifestFilenameRoot, manifest) return result, nil } // SaveManifest implements LocalManifests. func (ms *FsManifests) SaveManifest(manifest *Manifest, filename string) error { err := ms.save(manifest, filename) if err != nil { return err } return loadKeys(manifest.Signed, ms.keys) } // SaveComponentManifest implements LocalManifests. func (ms *FsManifests) SaveComponentManifest(manifest *Manifest, filename string) error { return ms.save(manifest, filename) } func (ms *FsManifests) save(manifest *Manifest, filename string) error { bytes, err := cjson.Marshal(manifest) if err != nil { return err } // Save all manifests in `$TIUP_HOME/manifests` path := filepath.Join(ms.profile.Root(), localdata.ManifestParentDir, filename) // create sub directory if needed if err := utils.MkdirAll(filepath.Dir(path), 0755); err != nil { return errors.Trace(err) } err = utils.WriteFile(path, bytes, os.ModePerm) if err != nil { return err } ms.cache.Store(filename, string(bytes)) return nil } // LoadManifest implements LocalManifests. func (ms *FsManifests) LoadManifest(role ValidManifest) (*Manifest, bool, error) { filename := role.Filename() manifest, err := ms.load(filename) if err != nil || manifest == "" { return nil, false, err } m, err := ReadManifest(strings.NewReader(manifest), role, ms.keys) if err != nil { // We can think that there is no such file if it's expired or has wrong signature if IsExpirationError(errors.Cause(err)) || IsSignatureError(errors.Cause(err)) { // Maybe we can os.Remove(filename) here if IsSignatureError(errors.Cause(err)) { fmt.Printf("Warn: %s\n", err.Error()) } return nil, false, nil } return m, true, err } ms.cache.Store(filename, manifest) return m, true, loadKeys(role, ms.keys) } // LoadComponentManifest implements LocalManifests. func (ms *FsManifests) LoadComponentManifest(item *ComponentItem, filename string) (*Component, error) { manifest, err := ms.load(filename) if err != nil || manifest == "" { return nil, err } component := new(Component) _, err = ReadComponentManifest(strings.NewReader(manifest), component, item, ms.keys) if err != nil { // We can think that there is no such file if it's expired or has wrong signature if IsExpirationError(errors.Cause(err)) || IsSignatureError(errors.Cause(err)) { // Maybe we can os.Remove(filename) here if IsSignatureError(errors.Cause(err)) { fmt.Printf("Warn: %s\n", err.Error()) } return nil, nil } return nil, err } ms.cache.Store(filename, manifest) return component, nil } // load return the file for the manifest from disk. // The returned string is empty if the file does not exist. func (ms *FsManifests) load(filename string) (string, error) { str, cached := ms.cache.Load(filename) if cached { return str.(string), nil } fullPath := filepath.Join(ms.profile.Root(), localdata.ManifestParentDir, filename) file, err := os.Open(fullPath) if err != nil { if os.IsNotExist(err) { // Use the hardcode root.json if there is no root.json currently if filename == ManifestFilenameRoot { initRoot, err := filepath.Abs(filepath.Join(ms.profile.Root(), "bin/root.json")) if err != nil { return "", errors.Trace(err) } bytes, err := os.ReadFile(initRoot) if err != nil { return "", &LoadManifestError{ manifest: "root.json", err: err, } } return string(bytes), nil } return "", nil } return "", err } defer file.Close() builder := strings.Builder{} _, err = io.Copy(&builder, file) if err != nil { return "", errors.AddStack(err) } return builder.String(), nil } // ComponentInstalled implements LocalManifests. func (ms *FsManifests) ComponentInstalled(component, version string) (bool, error) { return ms.profile.VersionIsInstalled(component, version) } // InstallComponent implements LocalManifests. func (ms *FsManifests) InstallComponent(reader io.Reader, targetDir, component, version, filename string, noExpand bool) error { if !noExpand { return utils.Untar(reader, targetDir) } if err := utils.MkdirAll(targetDir, 0755); err != nil { return errors.Trace(err) } writer, err := os.OpenFile(filepath.Join(targetDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) if err != nil { return errors.Trace(err) } defer writer.Close() if _, err = io.Copy(writer, reader); err != nil { return errors.Trace(err) } return nil } // KeyStore implements LocalManifests. func (ms *FsManifests) KeyStore() *KeyStore { return ms.keys } // ManifestVersion implements LocalManifests. func (ms *FsManifests) ManifestVersion(filename string) uint { data, err := ms.load(filename) if err != nil { return 0 } var manifest RawManifest err = json.Unmarshal([]byte(data), &manifest) if err != nil { return 0 } var base SignedBase err = json.Unmarshal(manifest.Signed, &base) if err != nil { return 0 } return base.Version } // TargetRootDir implements LocalManifests. func (ms *FsManifests) TargetRootDir() string { return ms.profile.Root() } // MockManifests is a LocalManifests implementation for testing. type MockManifests struct { Manifests map[string]*Manifest Saved []string Installed map[string]MockInstalled Ks *KeyStore RootDir string } // MockInstalled is used by MockManifests to remember what was installed for a component. type MockInstalled struct { Version string Contents string BinaryPath string } // NewMockManifests creates an empty MockManifests. func NewMockManifests() *MockManifests { return &MockManifests{ Manifests: map[string]*Manifest{}, Saved: []string{}, Installed: map[string]MockInstalled{}, Ks: NewKeyStore(), } } // SaveManifest implements LocalManifests. func (ms *MockManifests) SaveManifest(manifest *Manifest, filename string) error { ms.Saved = append(ms.Saved, filename) ms.Manifests[filename] = manifest return loadKeys(manifest.Signed, ms.Ks) } // SaveComponentManifest implements LocalManifests. func (ms *MockManifests) SaveComponentManifest(manifest *Manifest, filename string) error { ms.Saved = append(ms.Saved, filename) ms.Manifests[filename] = manifest return nil } // LoadManifest implements LocalManifests. func (ms *MockManifests) LoadManifest(role ValidManifest) (*Manifest, bool, error) { manifest, ok := ms.Manifests[role.Filename()] if !ok { return nil, false, nil } switch role.Filename() { case ManifestFilenameRoot: ptr := role.(*Root) *ptr = *manifest.Signed.(*Root) case ManifestFilenameIndex: ptr := role.(*Index) *ptr = *manifest.Signed.(*Index) case ManifestFilenameSnapshot: ptr := role.(*Snapshot) *ptr = *manifest.Signed.(*Snapshot) case ManifestFilenameTimestamp: ptr := role.(*Timestamp) *ptr = *manifest.Signed.(*Timestamp) default: return nil, true, fmt.Errorf("unknown manifest type: %s", role.Filename()) } err := loadKeys(role, ms.Ks) if err != nil { return nil, false, err } return manifest, true, nil } // LoadComponentManifest implements LocalManifests. func (ms *MockManifests) LoadComponentManifest(item *ComponentItem, filename string) (*Component, error) { manifest, ok := ms.Manifests[filename] if !ok { return nil, nil } comp, ok := manifest.Signed.(*Component) if !ok { return nil, fmt.Errorf("manifest %s is not a component manifest", filename) } return comp, nil } // ComponentInstalled implements LocalManifests. func (ms *MockManifests) ComponentInstalled(component, version string) (bool, error) { inst, ok := ms.Installed[component] if !ok { return false, nil } return inst.Version == version, nil } // InstallComponent implements LocalManifests. func (ms *MockManifests) InstallComponent(reader io.Reader, targetDir string, component, version, filename string, noExpand bool) error { buf := strings.Builder{} _, err := io.Copy(&buf, reader) if err != nil { return err } ms.Installed[component] = MockInstalled{ Version: version, Contents: buf.String(), BinaryPath: filepath.Join(targetDir, filename), } return nil } // KeyStore implements LocalManifests. func (ms *MockManifests) KeyStore() *KeyStore { return ms.Ks } // TargetRootDir implements LocalManifests. func (ms *MockManifests) TargetRootDir() string { if ms.RootDir != "" { return ms.RootDir } return "/tmp/mock" } // ManifestVersion implements LocalManifests. func (ms *MockManifests) ManifestVersion(filename string) uint { manifest, ok := ms.Manifests[filename] if ok { return manifest.Signed.Base().Version } return 0 } tiup-1.16.3/pkg/repository/v1manifest/local_manifests_test.go000066400000000000000000000040131505422223000243620ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "os" "path" "testing" "github.com/google/uuid" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/utils" "github.com/stretchr/testify/assert" ) // Create a profile directory // Contained stuff: // - index.json: with wrong signature // - snapshot.json: correct // - tidb.json: correct // - timestamp: with expired timestamp func genPollutedProfileDir() (string, error) { uid := uuid.New().String() dir := path.Join("/tmp", uid) wd, err := os.Getwd() if err != nil { return "", err } if err := utils.Copy(path.Join(wd, "testdata", "polluted"), dir); err != nil { return "", err } return dir, nil } func TestPollutedManifest(t *testing.T) { profileDir, err := genPollutedProfileDir() assert.Nil(t, err) defer os.RemoveAll(profileDir) profile := localdata.NewProfile(profileDir, &localdata.TiUPConfig{}) manifest, err := NewManifests(profile) assert.Nil(t, err) index := Index{} _, exist, err := manifest.LoadManifest(&index) assert.Nil(t, err) assert.False(t, exist) snap := Snapshot{} _, exist, err = manifest.LoadManifest(&snap) assert.Nil(t, err) assert.True(t, exist) timestamp := Timestamp{} _, exist, err = manifest.LoadManifest(×tamp) assert.Nil(t, err) assert.False(t, exist) filename := ComponentManifestFilename("tidb") tidb, err := manifest.LoadComponentManifest(&ComponentItem{ Owner: "pingcap", URL: "/tidb.json", }, filename) assert.NotNil(t, err) // Because index.json not load successfully assert.Nil(t, tidb) } tiup-1.16.3/pkg/repository/v1manifest/manifest.go000066400000000000000000000312471505422223000217770ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "crypto/sha256" "encoding/json" "fmt" "io" "strings" "time" cjson "github.com/gibson042/canonicaljson-go" "github.com/pingcap/errors" ) // Names of manifest ManifestsConfig const ( ManifestTypeRoot = "root" ManifestTypeIndex = "index" ManifestTypeSnapshot = "snapshot" ManifestTypeTimestamp = "timestamp" ManifestTypeComponent = "component" // Manifest URLs in a repository. ManifestURLRoot = "/root.json" ManifestURLIndex = "/index.json" ManifestURLSnapshot = "/snapshot.json" ManifestURLTimestamp = "/timestamp.json" // Manifest filenames when stored locally. ManifestFilenameRoot = "root.json" ManifestFilenameIndex = "index.json" ManifestFilenameSnapshot = "snapshot.json" ManifestFilenameTimestamp = "timestamp.json" // SpecVersion of current, maybe we could expand it later CurrentSpecVersion = "0.1.0" // AnyPlatform is the ID for platform independent components AnyPlatform = "any/any" // Acceptable values for hash kinds. SHA256 = "sha256" SHA512 = "sha512" ) // ty is type information about a manifest type ty struct { Filename string Versioned bool Expire time.Duration Threshold uint } // ManifestsConfig for different manifest ManifestsConfig var ManifestsConfig = map[string]ty{ ManifestTypeRoot: { Filename: ManifestFilenameRoot, Versioned: true, Expire: time.Hour * 24 * 365, // 1y Threshold: 3, }, ManifestTypeIndex: { Filename: ManifestFilenameIndex, Versioned: true, Expire: time.Hour * 24 * 365, // 1y Threshold: 1, }, ManifestTypeComponent: { Filename: "", Versioned: true, Expire: time.Hour * 24 * 365 * 5, // 5y Threshold: 1, }, ManifestTypeSnapshot: { Filename: ManifestFilenameSnapshot, Versioned: false, Expire: time.Hour * 24 * 30, // 1mon, should be changed to 1d Threshold: 1, }, ManifestTypeTimestamp: { Filename: ManifestFilenameTimestamp, Versioned: false, Expire: time.Hour * 24 * 30, // 1mon, should be changed to 1d Threshold: 1, }, } var knownVersions = map[string]bool{ "0.1.0": true, } // AddSignature adds one or more signatures to the manifest func (manifest *Manifest) AddSignature(sigs []Signature) { hasSig := func(lst []Signature, s Signature) bool { for _, sig := range lst { if sig.KeyID == s.KeyID { return true } } return false } for _, sig := range sigs { if hasSig(manifest.Signatures, sig) { continue } manifest.Signatures = append(manifest.Signatures, sig) } } // ExpirationError the a manifest has expired. type ExpirationError struct { fname string date string } func (s *ExpirationError) Error() string { return fmt.Sprintf("manifest %s has expired at: %s", s.fname, s.date) } func newExpirationError(fname, date string) *ExpirationError { return &ExpirationError{ fname: fname, date: date, } } // IsExpirationError checks if the err is an ExpirationError. func IsExpirationError(err error) bool { _, ok := err.(*ExpirationError) return ok } // ComponentManifestFilename returns the expected filename for the component manifest identified by id. func ComponentManifestFilename(id string) string { return fmt.Sprintf("%s.json", id) } // RootManifestFilename returns the expected filename for the root manifest with the given version. func RootManifestFilename(version uint) string { return fmt.Sprintf("%v.root.json", version) } // Filename returns the unversioned name that the manifest should be saved as based on the type in s. func (s *SignedBase) Filename() string { fname := ManifestsConfig[s.Ty].Filename if fname == "" { panic("Unreachable") } return fname } // Versioned indicates whether versioned versions of a manifest are saved, e.g., 42.foo.json. func (s *SignedBase) Versioned() bool { return ManifestsConfig[s.Ty].Versioned } // CheckExpiry return not nil if it's expired. func CheckExpiry(fname, expires string) error { expiresTime, err := time.Parse(time.RFC3339, expires) if err != nil { return errors.AddStack(err) } if expiresTime.Before(time.Now()) { return newExpirationError(fname, expires) } return nil } // ExpiresAfter checks that manifest 1 expires after manifest 2 (or are equal) and returns an error otherwise. func ExpiresAfter(m1, m2 ValidManifest) error { time1, err := time.Parse(time.RFC3339, m1.Base().Expires) if err != nil { return errors.AddStack(err) } time2, err := time.Parse(time.RFC3339, m2.Base().Expires) if err != nil { return errors.AddStack(err) } if time1.Before(time2) { return fmt.Errorf("manifests have mis-ordered expiry times, expected %s >= %s", time1, time2) } return nil } // isValid checks if s is valid manifest metadata. func (s *SignedBase) isValid(filename string) error { if _, ok := ManifestsConfig[s.Ty]; !ok { return fmt.Errorf("unknown manifest type: `%s`", s.Ty) } if !knownVersions[s.SpecVersion] { return fmt.Errorf("unknown manifest version: `%s`, you might need to update TiUP", s.SpecVersion) } // When updating root, we only check the newest version is not expire. // This checking should be done by the update root flow. if s.Ty != ManifestTypeRoot { if err := CheckExpiry(filename, s.Expires); err != nil { return err } } return nil } func (manifest *Root) isValid() error { types := []string{ManifestTypeRoot, ManifestTypeIndex, ManifestTypeSnapshot, ManifestTypeTimestamp} for _, ty := range types { role, ok := manifest.Roles[ty] if !ok { return fmt.Errorf("root manifest is missing %s role", ty) } if uint(len(role.Keys)) < role.Threshold { return fmt.Errorf("%s role in root manifest does not have enough keys; expected: %v, found: %v", ty, role.Threshold, len(role.Keys)) } // Check all keys have valid id and known types. for id, k := range role.Keys { serInfo, err := cjson.Marshal(k) if err != nil { return err } hash := fmt.Sprintf("%x", sha256.Sum256(serInfo)) if hash != id { return fmt.Errorf("id does not match key. Expected: %s, found %s", hash, id) } if k.Type != "rsa" { return fmt.Errorf("unsupported key type %s in key %s", k.Type, id) } if k.Scheme != "rsassa-pss-sha256" { return fmt.Errorf("unsupported scheme %s in key %s", k.Scheme, id) } } } return nil } func (manifest *Index) isValid() error { // Check every component's owner exists. for k, c := range manifest.Components { if _, ok := manifest.Owners[c.Owner]; !ok { return fmt.Errorf("component %s has unknown owner %s", k, c.Owner) } } // Check every default is in component. for _, d := range manifest.DefaultComponents { if _, ok := manifest.Components[d]; !ok { return fmt.Errorf("default component %s is unknown", d) } } return nil } func (manifest *Component) isValid() error { // Nothing to do. return nil } // VersionItem returns VersionItem by platform and version func (manifest *Component) VersionItem(plat, ver string, includeYanked bool) *VersionItem { var v VersionItem var ok bool if includeYanked { v, ok = manifest.VersionListWithYanked(plat)[ver] } else { v, ok = manifest.VersionList(plat)[ver] } if !ok || v.Entry == "" { return nil } return &v } func (manifest *Snapshot) isValid() error { // Nothing to do. return nil } func (manifest *Timestamp) isValid() error { snapshot, ok := manifest.Meta[ManifestURLSnapshot] if !ok { return errors.New("timestamp manifest is missing entry for snapshot.json") } if len(manifest.Meta) > 1 { return errors.New("timestamp manifest has too many entries in `meta`") } if len(snapshot.Hashes) == 0 { return errors.New("timestamp manifest missing hash for snapshot.json") } return nil } // SnapshotHash returns the hashes of the snapshot manifest as specified in the timestamp manifest. func (manifest *Timestamp) SnapshotHash() FileHash { return manifest.Meta[ManifestURLSnapshot] } // VersionedURL looks up url in the snapshot and returns a modified url with the version prefix, and that file's length. func (manifest *Snapshot) VersionedURL(url string) (string, *FileVersion, error) { entry, ok := manifest.Meta[url] if !ok { return "", nil, fmt.Errorf("no entry in snapshot manifest for %s", url) } lastSlash := strings.LastIndex(url, "/") if lastSlash < 0 { return fmt.Sprintf("%v.%s", entry.Version, url), &entry, nil } return fmt.Sprintf("%s/%v.%s", url[:lastSlash], entry.Version, url[lastSlash+1:]), &entry, nil } // ReadComponentManifest reads a component manifest from input and validates it. func ReadComponentManifest(input io.Reader, com *Component, item *ComponentItem, keys *KeyStore) (*Manifest, error) { decoder := json.NewDecoder(input) // For prevent the signatures verify failed from specification changing // we use JSON raw message decode the signed part first. rawM := RawManifest{} if err := decoder.Decode(&rawM); err != nil { return nil, errors.Trace(err) } if err := json.Unmarshal(rawM.Signed, com); err != nil { return nil, errors.Trace(err) } err := keys.verifySignature(rawM.Signed, item.Owner, rawM.Signatures, com.Filename()) if err != nil { return nil, errors.Trace(err) } m := &Manifest{ Signatures: rawM.Signatures, Signed: com, } err = m.Signed.Base().isValid(m.Signed.Filename()) if err != nil { return nil, errors.Trace(err) } return m, m.Signed.isValid() } // ReadNoVerify will read role from input and will not do any validation or verification. It is very dangerous to use // this function and it should only be used to read trusted data from local storage. func ReadNoVerify(input io.Reader, role ValidManifest) (*Manifest, error) { decoder := json.NewDecoder(input) var m Manifest m.Signed = role return &m, decoder.Decode(&m) } // ReadManifest reads a manifest from input and validates it, the result is stored in role, which must be a pointer type. func ReadManifest(input io.Reader, role ValidManifest, keys *KeyStore) (*Manifest, error) { if role.Base().Ty == ManifestTypeComponent { return nil, errors.New("trying to read component manifest as non-component manifest") } decoder := json.NewDecoder(input) // To prevent signatures verification failure from specification changing // we use JSON raw message decode the signed part first. rawM := RawManifest{} if err := decoder.Decode(&rawM); err != nil { return nil, errors.Trace(err) } if err := cjson.Unmarshal(rawM.Signed, role); err != nil { return nil, errors.Trace(err) } err := keys.verifySignature(rawM.Signed, role.Base().Ty, rawM.Signatures, role.Base().Filename()) if err != nil { return nil, errors.Trace(err) } m := &Manifest{ Signatures: rawM.Signatures, Signed: role, } if role.Base().Ty == ManifestTypeRoot { newRoot := role.(*Root) threshold := newRoot.Roles[ManifestTypeRoot].Threshold err = keys.transitionRoot(rawM.Signed, threshold, newRoot.Expires, m.Signatures, newRoot.Roles[ManifestTypeRoot].Keys) if err != nil { return nil, errors.Trace(err) } } err = m.Signed.Base().isValid(m.Signed.Filename()) if err != nil { return nil, errors.Trace(err) } return m, m.Signed.isValid() } // RenewManifest resets and extends the expire time of manifest func RenewManifest(m ValidManifest, startTime time.Time, extend ...time.Duration) { // manifest with 0 version means it's unversioned if m.Base().Version > 0 { m.Base().Version++ } // only update expire field when it's older than target expire time duration := ManifestsConfig[m.Base().Ty].Expire if len(extend) > 0 { duration = extend[0] } targetExpire := startTime.Add(duration) currentExpire, err := time.Parse(time.RFC3339, m.Base().Expires) if err != nil { m.Base().Expires = targetExpire.Format(time.RFC3339) return } if currentExpire.Before(targetExpire) { m.Base().Expires = targetExpire.Format(time.RFC3339) } } // loadKeys stores all keys declared in manifest into ks. func loadKeys(manifest ValidManifest, ks *KeyStore) error { switch manifest.Base().Ty { case ManifestTypeRoot: root := manifest.(*Root) for name, role := range root.Roles { if err := ks.AddKeys(name, role.Threshold, manifest.Base().Expires, role.Keys); err != nil { return errors.Trace(err) } } case ManifestTypeIndex: index := manifest.(*Index) for name, owner := range index.Owners { if err := ks.AddKeys(name, uint(owner.Threshold), manifest.Base().Expires, owner.Keys); err != nil { return errors.Trace(err) } } } return nil } tiup-1.16.3/pkg/repository/v1manifest/manifest_test.go000066400000000000000000000040511505422223000230270ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "testing" "github.com/stretchr/testify/assert" ) // TODO test that invalid manifests trigger errors // TODO test SignAndWrite func TestVersionItem(t *testing.T) { manifest := &Component{ Platforms: map[string]map[string]VersionItem{ "linux/amd64": { "v1.0.0": {Entry: "test"}, "v1.1.1": {Entry: "test", Yanked: true}, }, "any/any": { "v1.0.0": {Entry: "test"}, }, // If hit this, the result of VersionItem will be nil since we don't have an entry "darwin/any": { "v1.0.0": {}, }, "any/arm64": { "v1.0.0": {}, }, }, } assert.NotNil(t, manifest.VersionItem("linux/amd64", "v1.0.0", false)) assert.Nil(t, manifest.VersionItem("linux/amd64", "v1.1.1", false)) assert.NotNil(t, manifest.VersionItem("linux/amd64", "v1.1.1", true)) assert.NotNil(t, manifest.VersionItem("windows/386", "v1.0.0", false)) assert.NotNil(t, manifest.VersionItem("any/any", "v1.0.0", false)) assert.Nil(t, manifest.VersionItem("darwin/any", "v1.0.0", false)) assert.Nil(t, manifest.VersionItem("any/arm64", "v1.0.0", false)) manifest = &Component{ Platforms: map[string]map[string]VersionItem{ "linux/amd64": { "v1.0.0": {Entry: "test"}, }, }, } assert.NotNil(t, manifest.VersionItem("linux/amd64", "v1.0.0", false)) assert.Nil(t, manifest.VersionItem("windows/386", "v1.0.0", false)) assert.Nil(t, manifest.VersionItem("any/any", "v1.0.0", false)) assert.Nil(t, manifest.VersionItem("darwin/any", "v1.0.0", false)) assert.Nil(t, manifest.VersionItem("any/arm64", "v1.0.0", false)) } tiup-1.16.3/pkg/repository/v1manifest/repo.go000066400000000000000000000316641505422223000211410ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "crypto/sha256" "crypto/sha512" "encoding/hex" "encoding/json" stderrors "errors" "fmt" "io" "os" "path" "path/filepath" "time" cjson "github.com/gibson042/canonicaljson-go" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/crypto" "github.com/pingcap/tiup/pkg/set" "github.com/pingcap/tiup/pkg/utils" ) // ErrorInsufficientKeys indicates that the key number is less than threshold var ErrorInsufficientKeys = stderrors.New("not enough keys supplied") // Init creates and initializes an empty reposityro func Init(dst, keyDir string, initTime time.Time) (err error) { // initial manifests manifests := make(map[string]ValidManifest) signedManifests := make(map[string]*Manifest) // TODO: bootstrap a server instead of generating key keys := map[string][]*KeyInfo{} for _, ty := range []string{ManifestTypeRoot, ManifestTypeIndex, ManifestTypeSnapshot, ManifestTypeTimestamp} { if err := GenAndSaveKeys(keys, ty, int(ManifestsConfig[ty].Threshold), keyDir); err != nil { return err } } // init the root manifest manifests[ManifestTypeRoot] = NewRoot(initTime) // init index manifests[ManifestTypeIndex] = NewIndex(initTime) // init snapshot manifests[ManifestTypeSnapshot] = NewSnapshot(initTime) // init timestamp manifests[ManifestTypeTimestamp] = NewTimestamp(initTime) // root and snapshot has meta of each other inside themselves, but it's ok here // as we are still during the init process, not version bump needed for ty, val := range ManifestsConfig { if val.Filename == "" { // skip unsupported ManifestsConfig such as component continue } if m, ok := manifests[ty]; ok { if err := manifests[ManifestTypeRoot].(*Root).SetRole(m, keys[ty]...); err != nil { return err } continue } // FIXME: log a warning about manifest not found instead of returning error return fmt.Errorf("manifest '%s' not initialized porperly", ty) } if signedManifests[ManifestTypeRoot], err = SignManifest(manifests[ManifestTypeRoot], keys[ManifestTypeRoot]...); err != nil { return err } if signedManifests[ManifestTypeIndex], err = SignManifest(manifests[ManifestTypeIndex], keys[ManifestTypeIndex]...); err != nil { return err } if _, err = manifests[ManifestTypeSnapshot].(*Snapshot).SetVersions(signedManifests); err != nil { return err } if signedManifests[ManifestTypeSnapshot], err = SignManifest(manifests[ManifestTypeSnapshot], keys[ManifestTypeSnapshot]...); err != nil { return err } if _, err = manifests[ManifestTypeTimestamp].(*Timestamp).SetSnapshot(signedManifests[ManifestTypeSnapshot]); err != nil { return err } if signedManifests[ManifestTypeTimestamp], err = SignManifest(manifests[ManifestTypeTimestamp], keys[ManifestTypeTimestamp]...); err != nil { return err } return BatchSaveManifests(dst, signedManifests) } // SaveKeyInfo saves a KeyInfo object to a JSON file func SaveKeyInfo(key *KeyInfo, ty, dir string) (string, error) { id, err := key.ID() if err != nil { return "", err } if dir == "" { dir, err = os.Getwd() if err != nil { return "", err } } if utils.IsNotExist(dir) { if err := utils.MkdirAll(dir, 0755); err != nil { return "", errors.Annotate(err, "create key directory") } } pubPath := path.Join(dir, fmt.Sprintf("%s-%s.json", id[:ShortKeyIDLength], ty)) f, err := os.Create(pubPath) if err != nil { return pubPath, err } defer f.Close() if _, found := key.Value["private"]; found { err = f.Chmod(0600) if err != nil { return pubPath, err } } return pubPath, json.NewEncoder(f).Encode(key) } // GenAndSaveKeys generate private keys to keys param and save key file to dir func GenAndSaveKeys(keys map[string][]*KeyInfo, ty string, num int, dir string) error { for range num { k, err := GenKeyInfo() if err != nil { return err } keys[ty] = append(keys[ty], k) if _, err := SaveKeyInfo(k, ty, dir); err != nil { return err } } return nil } // SignManifestData add signatures to a manifest data func SignManifestData(data []byte, ki *KeyInfo) ([]byte, error) { m := RawManifest{} if err := json.Unmarshal(data, &m); err != nil { return nil, errors.Annotate(err, "unmarshal manifest") } var signed any if err := json.Unmarshal(m.Signed, &signed); err != nil { return nil, errors.Annotate(err, "unmarshal manifest.signed") } payload, err := cjson.Marshal(signed) if err != nil { return nil, errors.Annotate(err, "marshal manifest.signed") } id, err := ki.ID() if err != nil { return nil, err } sig, err := ki.Signature(payload) if err != nil { return nil, err } for _, s := range m.Signatures { if s.KeyID == id { s.Sig = sig return nil, errors.New("this manifest file has already been signed by specified key") } } m.Signatures = append(m.Signatures, Signature{ KeyID: id, Sig: sig, }) content, err := cjson.Marshal(m) if err != nil { return nil, errors.Annotate(err, "marshal signed manifest") } return content, nil } // NewRoot creates a Root object func NewRoot(initTime time.Time) *Root { return &Root{ SignedBase: SignedBase{ Ty: ManifestTypeRoot, SpecVersion: CurrentSpecVersion, Expires: initTime.Add(ManifestsConfig[ManifestTypeRoot].Expire).Format(time.RFC3339), Version: 1, // initial repo starts with version 1 }, Roles: make(map[string]*Role), } } // NewIndex creates a Index object func NewIndex(initTime time.Time) *Index { return &Index{ SignedBase: SignedBase{ Ty: ManifestTypeIndex, SpecVersion: CurrentSpecVersion, Expires: initTime.Add(ManifestsConfig[ManifestTypeIndex].Expire).Format(time.RFC3339), Version: 1, }, Owners: make(map[string]Owner), Components: make(map[string]ComponentItem), DefaultComponents: make([]string, 0), } } // NewSnapshot creates a Snapshot object. func NewSnapshot(initTime time.Time) *Snapshot { return &Snapshot{ SignedBase: SignedBase{ Ty: ManifestTypeSnapshot, SpecVersion: CurrentSpecVersion, Expires: initTime.Add(ManifestsConfig[ManifestTypeSnapshot].Expire).Format(time.RFC3339), Version: 0, // not versioned }, Meta: make(map[string]FileVersion), } } // NewTimestamp creates a Timestamp object func NewTimestamp(initTime time.Time) *Timestamp { return &Timestamp{ SignedBase: SignedBase{ Ty: ManifestTypeTimestamp, SpecVersion: CurrentSpecVersion, Expires: initTime.Add(ManifestsConfig[ManifestTypeTimestamp].Expire).Format(time.RFC3339), Version: 1, }, } } // NewComponent creates a Component object func NewComponent(id, desc string, initTime time.Time) *Component { return &Component{ SignedBase: SignedBase{ Ty: ManifestTypeComponent, SpecVersion: CurrentSpecVersion, Expires: initTime.Add(ManifestsConfig[ManifestTypeComponent].Expire).Format(time.RFC3339), Version: 1, }, ID: id, Description: desc, Platforms: make(map[string]map[string]VersionItem), } } // SetVersions sets file versions to the snapshot func (manifest *Snapshot) SetVersions(manifestList map[string]*Manifest) (*Snapshot, error) { if manifest.Meta == nil { manifest.Meta = make(map[string]FileVersion) } for _, m := range manifestList { bytes, err := cjson.Marshal(m) if err != nil { return nil, err } manifest.Meta["/"+m.Signed.Filename()] = FileVersion{ Version: m.Signed.Base().Version, Length: uint(len(bytes)), } } return manifest, nil } // SetSnapshot hashes a snapshot manifest and update the timestamp manifest func (manifest *Timestamp) SetSnapshot(s *Manifest) (*Timestamp, error) { bytes, err := cjson.Marshal(s) if err != nil { return manifest, err } hash256 := sha256.Sum256(bytes) hash512 := sha512.Sum512(bytes) if manifest.Meta == nil { manifest.Meta = make(map[string]FileHash) } manifest.Meta[fmt.Sprintf("/%s", s.Signed.Base().Filename())] = FileHash{ Hashes: map[string]string{ SHA256: hex.EncodeToString(hash256[:]), SHA512: hex.EncodeToString(hash512[:]), }, Length: uint(len(bytes)), } return manifest, nil } // SetRole populates role list in the root manifest func (manifest *Root) SetRole(m ValidManifest, keys ...*KeyInfo) error { if manifest.Roles == nil { manifest.Roles = make(map[string]*Role) } manifest.Roles[m.Base().Ty] = &Role{ URL: fmt.Sprintf("/%s", m.Filename()), Threshold: ManifestsConfig[m.Base().Ty].Threshold, Keys: make(map[string]*KeyInfo), } if uint(len(keys)) < manifest.Roles[m.Base().Ty].Threshold { return ErrorInsufficientKeys } for _, k := range keys { id, err := k.ID() if err != nil { return err } pub, err := k.Public() if err != nil { return err } manifest.Roles[m.Base().Ty].Keys[id] = pub } return nil } // AddKey adds a public key info to a role of Root func (manifest *Root) AddKey(roleName string, key *KeyInfo) error { newID, err := key.ID() if err != nil { return err } role, found := manifest.Roles[roleName] if !found { return errors.Errorf("role '%s' not found in root manifest", roleName) } for _, k := range role.Keys { id, err := k.ID() if err != nil { return err } if newID == id { return nil // skip exist } } role.Keys[newID] = key return nil } // FreshKeyInfo generates a new key pair and wraps it in a KeyInfo. The returned string is the key id. func FreshKeyInfo() (*KeyInfo, string, crypto.PrivKey, error) { priv, err := crypto.NewKeyPair(crypto.KeyTypeRSA, crypto.KeySchemeRSASSAPSSSHA256) if err != nil { return nil, "", nil, err } pubBytes, err := priv.Public().Serialize() if err != nil { return nil, "", nil, err } info := KeyInfo{ Type: "rsa", Value: map[string]string{"public": string(pubBytes)}, Scheme: "rsassa-pss-sha256", } serInfo, err := cjson.Marshal(&info) if err != nil { return nil, "", nil, err } hash := sha256.Sum256(serInfo) return &info, fmt.Sprintf("%x", hash), priv, nil } // ReadManifestDir reads manifests from a dir func ReadManifestDir(dir string, roles ...string) (map[string]ValidManifest, error) { manifests := make(map[string]ValidManifest) roleSet := set.NewStringSet(roles...) for ty, val := range ManifestsConfig { if len(roles) > 0 && !roleSet.Exist(ty) { continue // skip unlisted } if val.Filename == "" { continue } reader, err := os.Open(filepath.Join(dir, val.Filename)) if err != nil { return nil, err } defer reader.Close() var role ValidManifest m, err := ReadManifest(reader, role, nil) if err != nil { return nil, err } manifests[ty] = m.Signed } return manifests, nil } // SignManifest signs a manifest with given private key func SignManifest(role ValidManifest, keys ...*KeyInfo) (*Manifest, error) { payload, err := cjson.Marshal(role) if err != nil { return nil, err } signs := []Signature{} for _, k := range keys { id, err := k.ID() if err != nil { return nil, errors.Trace(err) } sign, err := k.Signature(payload) if err != nil { return nil, errors.Trace(err) } signs = append(signs, Signature{ KeyID: id, Sig: sign, }) } return &Manifest{ Signatures: signs, Signed: role, }, nil } // WriteManifestFile writes a Manifest object to file in JSON format func WriteManifestFile(fname string, m *Manifest) error { writer, err := os.OpenFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) if err != nil { return err } defer writer.Close() return WriteManifest(writer, m) } // WriteManifest writes a Manifest object to writer in JSON format func WriteManifest(out io.Writer, m *Manifest) error { bytes, err := cjson.Marshal(m) if err != nil { return errors.Trace(err) } _, err = out.Write(bytes) return err } // SignAndWrite creates a manifest and writes it to out. func SignAndWrite(out io.Writer, role ValidManifest, keys ...*KeyInfo) error { manifest, err := SignManifest(role, keys...) if err != nil { return errors.Trace(err) } return WriteManifest(out, manifest) } // BatchSaveManifests write a series of manifests to disk // Manifest in the manifestList map should already be signed, they are not checked // for signature again. func BatchSaveManifests(dst string, manifestList map[string]*Manifest) error { for ty, m := range manifestList { filename := m.Signed.Filename() if ty == ManifestTypeIndex { filename = fmt.Sprintf("%d.%s", m.Signed.Base().Version, m.Signed.Filename()) } writer, err := os.OpenFile(filepath.Join(dst, filename), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) if err != nil { return err } defer writer.Close() if err = WriteManifest(writer, m); err != nil { return err } } return nil } tiup-1.16.3/pkg/repository/v1manifest/testdata/000077500000000000000000000000001505422223000214445ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/000077500000000000000000000000001505422223000232745ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/bin/000077500000000000000000000000001505422223000240445ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/bin/root.json000066400000000000000000000161531505422223000257300ustar00rootroot00000000000000{"signatures":[{"keyid":"18007f60ccfdf9fa5bc79b09464880c054ce34289e97ad93696b85cc43aed314","sig":"qBPdmrhrBW9FPX5urZ973C8nLmwFEOstUBUeVKZIOws81kgpcxQGOQep8iYOKA01O16c/EjI0KwVcjp641t25y1I2AlQ1Wqz2svS/LhTTRs8z8csUWlnxK2gOykjmkaWS3MVUNTlewpBUq/Jhqo5+XLm0Df/VKZwr4wUH0UuJ67QdaY886MXRgRC5JFEYF0VD6z67oa+V8zK3sfjan7FOtppD/anWk4nmd31mnYq3vQsiXgodvMgfo2AGQ1u73Ztunf2jKiInrAJWifBJ9CHvVHU6Kv9mcCOSf4orpPFndw2vmH/hyF/a1RZIg9J/JRzVR03uRp9AAI1+bYSKTuZvw=="},{"keyid":"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a","sig":"SuF1pM8SvDw+t4BeLgSbojJo5Bb+/3hGjQ9RRmnJEAf7BvyjifaU6kz57a+DhabbCyzHJQJYwfA2AzHUYGGRLC2xyXL0SY41J6qqXQoqYTuyqeHGZ2dpGH7lwyzf9EWyY0Z9O/BN0+WGuh7UAtY6uauNAJt6TryKwLpF+AiEZWFcVnDC76uwmEJBJkxAzSL0l28ansmJm6D0k/UbFp0KmcOFN8UJwtx6OrjPd0tPK/3+67fkOqefZPA1OrjfV82t6NEERc/hu4NIJL7JX/PKSYmStLnQmb6lJ+pwlvkDfsYBAQNn7kHmtyc2XAV7NrcccG6Z1c71x+ChUJce6e4CuA=="},{"keyid":"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002","sig":"PrWmZLR7RX5gaUzEg/X93YWvylEVTP/oMhr1mEiVwqU1lf1FPFK+H1QNsYOFpekj1/90KEvBOxl1aDyfDIIGPqLYA1Hmo08Lt5Lqjw9Jv4urXCSZakuQeLFqXGXrXHHBz+tjKQ5W/1XXzRnxjjvkPnsNkj7mxy7j9HgJ4SDiQXCnWtuBOLKi5AOnW3B0zee3kz6tY3b4iKezT47qO2AGa097sqNpdNAG4QIj9bCLZqYGNYl/Xk2EYuRiH4bzuchQLSel9b20kF8dZ8JiWgfrMIkxBA690nD+g8LfRPPxIxCeW1t2ne9YMFnzvjFdSTmrleous7bHxGSKD+Btmdk1Tg=="},{"keyid":"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae","sig":"FKxj84RY10ikxmTzG7VSq2mBCKzpEBgp5Igf9qWla1jeGpPhT51+6Amfxp8M6WbQZBzJG8PiSKetGCXID9Zn/X4Cxg7/TOoH/h4R1KwWQik9rVXPuFyiwsz+aLyrG0ZYU1RTQWmVp/i4eTQZaK7DIH3tmHLWrMj8WuL+dU9RlQW2xRyHV265l3Tpg+eC2E9Ucgxy4JtfoNiGZgnWL7prFFP3Ktu3+wpjfIVfDMPg7u0dqX+ROzv7OAMF2N/ZEW2XmT3tNSuaw77s1JmDudqrTlg4k5foPJyflgLQgAssT0sg50QonYP7O9BlPWRjn+1HRjmrfDGIkZZOUhunDp7jNg=="},{"keyid":"ef644d810a1f1dcce7078ae5b2821cba346a2eac0a371e56feea9e07a5eade37","sig":"QejezQQxHP0VHizh5nMtgxrahCnQgApmwpWs8mIxp1NnnBr5RIVdF9eUykafu6/OXGY8Vy8qEJvfxj8N1jw+rvbC9Ghgcej0Ijj9RQAnyTqQ5PeOzmg4h8ZdkH4J2YufwMWYcI/+qlM6qQusuyahAa5xKkxYCQWSb+Qw0Btd066VwajIUmjI8unUIwtm8miGJbA2GjUTgSnCazw3vavxjy3FR1ce1KpzdigztkWtuABQS8xJ7/ttJPAKokyq2h4RFV9hrvDKZNDfxIBp+1M7f1VqAthaact3nHfcsNoXQxxrDszhAOlL7b22riloD5Z7lNg0jeaDk2l6NyX5J1XfUQ=="}],"signed":{"_type":"root","expires":"2026-07-26T11:18:30+08:00","roles":{"index":{"keys":{"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn5kVA7MlBfSe7EBaExjl\nKbwoDkn1aYi74s29mFgtRo8nejbrVvZQMCIUhvKc0pFa/l9JD/QY6/nAOCE1lpzi\nwwNkSntfOo3p3HQIR+Ut7hZ4Sxfe/5JagGo3LQ+Hd3EJWUxyEfQ/Bff07F3XAbqM\n5+cKNrdsKWZJcPiJDW621qGwCx52f+gzl9bnFe4/hx34OUgirwqh5DS+LhIO+/yt\nbOiN1AyjQKlnb8lUnblElS4Njd+F4io5VzSrZYi2+4AbTkO6wLwbsWHMzXfv9qwn\nvllufOHpB6EwiQ/xBOMuvJJymHnZvs8AH4SuydQIXLaJuv1ysFaBs0KB/ktbakSK\nLwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/index.json"},"root":{"keys":{"18007f60ccfdf9fa5bc79b09464880c054ce34289e97ad93696b85cc43aed314":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4DYlVfoIQTlyJij0ynjh\njqUkayqXX5c9VXw1Ud3mWCOdThy6V0bmsohgSBeHrfVroSCfsAc5VCUlaSteZeFl\nQEZxpRWDCmSYGslOQZqe2cJi5aqyQOYeU7JLjlfAausLCR9636SfEvQoaCEuGsUI\n67yCVWW2oQ756egUNmOrOSd7Qh6IGuuj9FQb9vExPXTxQw7j95ENOsc1V2lAXCEG\nS1+Nh4NIKdpLOXAohbcpq/HLjddmEAj2GXHo+asITlHCVUQvf574Vh5yLkFWnqj0\nviyRq0jJa9P+qA2oy80a3dk3FBCPu0sov6GfUIC+NtkDfjOkKfluBF9WapqR9wt0\noQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyDwCfCl30vhyJW7fB1bs\npRYKtBKzl7o0qnJTm+IksjQ8RXxj8osUpMLmSvOzCaJ5Wxe+Pm1LpSTDbbubbgvd\nnmEFL6228sifviNIu2HlIl+agfzmXuJ9OBlzGUaI4gAd1Z6pF6+mjlcjz2PbWF84\nAbXZdK49uluqulp7HrGB/qNjGcIRUCHgDU4nnq0OkI1BZZSKm9ovonqDkIK76x/S\niAD9OjKsjQ/s57tE+5WTVObKpfrfK0JeHdpAUsA/2n4L1Z6FmZD4LZWqb0i+C7xj\nMElC99KtjlwRntcjeVWG9YjU8AcEN0n1gON9S2oRdyyAzDTgGb7WueDnn6qstt5w\nSQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOgQkwLOh31QV9OpbO9v\n6o83durJFGPOnVXZiab83pKaSk7HEK9WzXBq0BaPvtFwSfROVdpgtopri5lZi+uH\naMKLUn5F8XRnSMl/7m5vM4XpZZYa4aQId4TWdbFtTu31eHGZ3eEC5nDRJ5NhZOJd\nKLFBu/xmxrh/eNZt4QbdWLZayjHnzyoy5AnfNTR6nJgPAv+rBOqyqT/r14q4Pngh\n3z0I3pNFr5qmxsp013XV+kgOW1F7zT7IMU8xRIgo85UWUNhax0/bjY/2NI1Z+WjR\nyhZmUBMVYWvfw97xDUrvBvrJxZPgg0lGvxJC6LF2dM7wgLaNx9khT6HMBVxjxLMs\nDQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"ef644d810a1f1dcce7078ae5b2821cba346a2eac0a371e56feea9e07a5eade37":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqsL5sV9dhPqnkc3dU2xH\nVRPVuH1bebET64bJya96IXjR3Um/IbIikmIpAL8KbY35h44hR4nNwUQZcQggo854\n5SxDi5LiAkMqdr9uq5mXp7sZXb0HcuHX97BqTUvTvr+t05KaON81ikdVGyRw+Qus\nFFXZO2Pj0w0I4QD87nISAuK0wQJhD8robDzO+Qf2K5cHXjEu5DGNc+wq66pJWCwt\nDl2BAvkF86Y3kZVuEQ6zp5PPQh0l++0PtzY/NNNHiLm7JUSlmpXyis7f+FaCEGl0\n4JWs5ImJg1XjUo2AsSnlFZ3adrPJ4NHFo64ui0/JsEAhn1TBWLL4AhT9kVIBMXI4\n0wIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":3,"url":"/root.json"},"snapshot":{"keys":{"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqTZx29eJR5EumjqM4YTb\nFlKbim1GNYmtbCLH51BbU2lt46ddmfGvtGsxTD3mIZ/GEHVFv6Aei3xx5nIfhGP0\nrG78JRz394uU8Pd62DiIFWYizr5o+ZBZu29D2YK5ZtxoLFpgt0ibnINK2NcesDC8\nSqfIUbMiQFT6yB/MYD275SjfRGHOeYTPmKdjMJrhLL2cfIPYnQ0QFYIyMvXBG1Fj\nU0rc9UclYQHh9YheIDVYI9YCo/DWP3KFfRJpoTjQRGoPSK9TXcpCAEzQpEG3jOek\n9PdV9Ol6/O8JbrFwXWF3LhkUThg+zCjV4qHtP4oqp5QCqzTQTXGQ9qxWUSlHi4Eu\nIwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/snapshot.json"},"timestamp":{"keys":{"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzTgV5iKhMnunUDxt4PB\npYqTMPaJN/ZdOOsP6cS3DeCE/EcYGfgCjvP7KD3gjG98VDBTVcuwZClSy+/zvHhV\nIq7VWu+yxQL5c6oa1xpCyHoA96JiLIDPhmqEdscdRybcRQ2CYywzKA8jSwEQCnEK\nc8a74ceY352l/MEcOem0+AtKrOjqcjbXCayDwC9yTg/c78bkp+4T8AhSWgt6Tlrt\nY8jLE7zwojFtIYtMwobWRIW2O3nJDXiSBbTPG3M9kF1G43INshSdBcuq5Tmy8lpE\n/XiG/E7+hP63Hm+KAcdvl553Zs7pLhAZxV0kqlApqRRwhscw+JQci8sVONun5t9t\nNwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/timestamp.json"}},"spec_version":"0.1.0","version":6}}tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/manifests/000077500000000000000000000000001505422223000252655ustar00rootroot00000000000000tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/manifests/index.json000066400000000000000000000024771505422223000273010ustar00rootroot00000000000000{"signatures":[{"keyid":"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61","sig":"auLOcy4p1L2Z7OFCY6i1gshoB0WbDACSd2OZdmpu+Lx78fevZhn3rIljvP2pGl81wm7S3XcXVN2MMSHe0LVtjJSSGhkM1B1wEAgZdO60WpEF41h12en+pke/YzsWksHtNdyGtlyjexamz6YDbIyBEiif/6JyVXZyfoDteGgw4OKFJqzmacTQ0FkWnYVFH6beSiKFuNOD4QoXOAq6KS2wt+pz/Kl443g+TyBRqwESBELwa7VOdKXX9VtDuqqBUjyZ/7xBxpv+2UdkCgs/Khf+91atJSq3lIDEztJ5WFn7qcERNndZaMuISo8uMXD9xi41q5bcQoWGPcj8ANHfkYXTxQ=="}],"signed":{"_type":"index","components":{"tidb":{"hidden":false,"owner":"pingcap","standalone":false,"url":"/tidb.json","yanked":false}},"default_components":[],"expires":"2021-06-23T12:05:15+08:00","owners":{"pingcap":{"keys":{"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"name":"PingCAP","threshold":1}},"spec_version":"0.1.0","version":420}} tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/manifests/snapshot.json000066400000000000000000000033401505422223000300170ustar00rootroot00000000000000{"signatures":[{"keyid":"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9","sig":"dOXUVd4ibr7FKvHuJzgNZ3xHEhVnoB58i0R85X3z8zfAMxZie+zCQ3yOU2VND0xdaoMq1/nyPABKOcS555dkvYyVv2ik2c2y7Q/H83y7+zhrbkkSKWSpQb1Rdu4CF8JS6oHjjnD90qAj5ok5rGmjMVVshF+ICKg1TPY8H2vdzv6J71mjOuuWZd10Ya7ZDlgJ67rQnZ9zrr4OHEfHz7eop8iFd2MGJq/K/M2FpDgYxw6p8/CNtMtSblNSrTzbYAXh1sBw6E+oQ97jaUfIRegI5dLg18JZMms02Cqt6qGb++TcmW9zZbydNuTdFVIlP8z8nEma0/ab10zT1n6/jlmGKA=="}],"signed":{"_type":"snapshot","expires":"2120-08-01T14:47:48+08:00","meta":{"/alertmanager.json":{"length":1511,"version":4},"/bench.json":{"length":6942,"version":13},"/blackbox_exporter.json":{"length":1559,"version":4},"/br.json":{"length":1834,"version":3},"/cdc.json":{"length":7327,"version":47},"/client.json":{"length":4558,"version":1},"/cluster.json":{"length":32845,"version":31},"/ctl.json":{"length":13887,"version":55},"/doc.json":{"length":2952,"version":1},"/drainer.json":{"length":30412,"version":54},"/grafana.json":{"length":37008,"version":59},"/index.json":{"length":3750,"version":420},"/insight.json":{"length":6302,"version":5},"/mirrors.json":{"length":2262,"version":2},"/node_exporter.json":{"length":1523,"version":4},"/package.json":{"length":3778,"version":1},"/pd.json":{"length":34609,"version":61},"/playground.json":{"length":16371,"version":27},"/prometheus.json":{"length":37550,"version":58},"/pump.json":{"length":29950,"version":58},"/pushgateway.json":{"length":1511,"version":4},"/root.json":{"length":7275,"version":1},"/server.json":{"length":1453,"version":2},"/tidb.json":{"length":34947,"version":62},"/tiflash.json":{"length":8300,"version":33},"/tikv.json":{"length":34976,"version":61},"/tiup.json":{"length":9732,"version":29}},"spec_version":"0.1.0","version":0}}tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/manifests/tidb.json000066400000000000000000001042031505422223000271020ustar00rootroot00000000000000{"signatures":[{"keyid":"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae","sig":"VGDCAfnD5rOXnz6GwCgDEWjmzvlczcwM2B2BrXdJmCT135UqN4EsalCSIJZaY8cjklHcM01qppaa8SGfHc5bviT4r1qygk06azb2lYq1Tu73npCYwXqEa2NEhamQEP6RQeGfNeDFrwTV+qTfcLAPnAKipg4drQxJLkMaBEp5zIsUYoYGjfY9CMmfTb+CdVlmKetY0qrgCTKNAljegHk7SRLli7V82sb6L0DEZSL0ERE2nf2kWqYdHqSzixxlbcbDzLw0gX6YdFs7DFBwzuSZNOSpuXUneVH5iCQpB2pwNFrBeWdsTkgnmNcnTvWGOlCFSkdUaHVYnlgdMjDNR0eMWQ=="}],"signed":{"_type":"component","description":"TiDB is an open source distributed HTAP database compatible with the MySQL protocol","expires":"2021-07-01T20:01:44+08:00","id":"tidb","nightly":"v4.0.0-beta.2-nightly-20200629","platforms":{"darwin/amd64":{"v3.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9ef93384b98a9c695d1be15e58ffec4dc64ccd0ba2a6f3e5b80a44e0ee55de03","sha512":"9b7e2e07548e24e796332344a6137c4d839e9e75a42bfa4881ee4033558b7a9e3315b3f066472b80b12ef08d6ba8433d1509e38111abb29362bbbe2925206223"},"length":26234205,"released":"2020-04-16T16:58:06+08:00","url":"/tidb-v3.0-darwin-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"61c3ec4864ec1c5551053c4247efcdd90e035bba809e10c943fe0acc89aea40f","sha512":"4fc6458d878c8c94658de5ec4afa6cdda1fcdac0a42b85d37989e6919b7e13012bcca59954384890f6086ed18de318fe02a3c6cdcd8a4bc816209260cf5905c7"},"length":26234250,"released":"2020-04-16T14:03:31+08:00","url":"/tidb-v3.0.0-darwin-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"165870b0c61c07c68e7b9e14372b75f2f94be4691eb52a6f9237e9ef2edcad4b","sha512":"e63e2d1075f9715b9e40bb4c2eccbb107e5e8e419b05eba72a74c9e48da3afcbef705816bf595db7e4bccd62810d2ca419a1da563352ca4e32d89016a8c0ed10"},"length":26302814,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-darwin-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"01eb225d187647bb6bcf5132ea579b6801569ab933bd5650125294d9828f9661","sha512":"9f5dbffea7cfc306848e51511cb418a8e9ea5e21ad8699157407e9403633ce39ee9899618d26f97eda2713b8aa0be231612f77e9e1ee4e87ed934c8a9a3cd9ad"},"length":27351570,"released":"2020-03-13T14:11:53.774527401+08:00","url":"/tidb-v3.0.10-darwin-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f1fcbdbadc825ada19b7b15920b06444fd57ef2ce4c3bedfdcd10d3799af9f58","sha512":"fa871ad3e05684990365d9c3ca186e2d6f3426d9fd756539181d61ec4e76551fdaa5ddb1146c183e391e22ac41aaf4c7d5d095c1c3f2711a5575bd7c605a7a5f"},"length":27399702,"released":"2020-04-17T01:09:20+08:00","url":"/tidb-v3.0.11-darwin-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0ccb355fca4d408c4d75232513ff58632d7bb43278203ce8528fc9e9dd5bbccd","sha512":"7055e820c5e9275c1aa9c7d5622af9f52d1e5be2fad3eab290f08594371016cd0be182232cd537ab3870994c042f78abb1f25541f19202356160cf4977778013"},"length":27399814,"released":"2020-04-17T01:16:04+08:00","url":"/tidb-v3.0.12-darwin-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7a9d935f01d23d6a9393e13f5ca96a8558e529bb6eeb1641fc6402e06c746670","sha512":"d791b4fceb8224a0ab6ba52b6c042493c0824861888fc69189ff0e9576e63adbb9eca6050812cd0c60bfaee43aea40586f58b5059c9f34be5a9ede1abcc9c303"},"length":27399048,"released":"2020-04-26T17:25:01+08:00","url":"/tidb-v3.0.13-darwin-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2551f0d2e13d5475770fb85257266c784efc22dc5ae7eb50bb3820eddfb87c4b","sha512":"7d0567a8ea11c7435cb93ff3de5e5f1f41457e743b4cf640eca96178c933ed905f9c61a7a419786cd9c5c3e6ad4dc59d5aec639e58d0a750a08c5a34e45d3fe3"},"length":27501728,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-darwin-amd64.tar.gz","yanked":false},"v3.0.15":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bbf2dc352567e14df8562c687f7356aa9b71ca26c64021af4129445b8b872038","sha512":"2122032666fb9b83faa2d2746a396044aba6ddee9dd01faa32d1d1e9e7cdd3b9d271c8609383c53b478b45a0ddb106c6bc528419c020d24977e0a0170d9b900f"},"length":27535110,"released":"2020-06-05T16:57:59+08:00","url":"/tidb-v3.0.15-darwin-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"6e7c4b5c17ed72e258c472a96a8ed6f63b67757551bb052dbceb718bec963a45","sha512":"a55b2522c396bfd7a989ef0f9822ff5f36d83503f43d0d3ac872992075e9df5fc9cdc53f26ca0b1f1b0aa7ddff710dfbfd26dde4399f523711c31f4c66db3cef"},"length":26394385,"released":"2020-04-16T23:55:11+08:00","url":"/tidb-v3.0.2-darwin-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1058683f1045004dcff6193d9e593a4288d17ac5e4541bf7a438192e371c551a","sha512":"1440fbffcaf7d00d2326cb04dbee2004973e0e3e7fc6ccf27a4b3f99d15f9d6820f1fc8b01ae5b9338ed1f3d0205cc8386ffb13f08eb8df48279d870e1c0ac97"},"length":26459748,"released":"2020-04-17T00:16:31+08:00","url":"/tidb-v3.0.3-darwin-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f9bb4c7489dcc392a6cfa2b75bb761fcb953f16f76a692d2a7fb207eefb25dc0","sha512":"2fb797be00000ed0be0f472ebf7a8f89af18adfe0655cdecfcbbd74e443b052aa89e0df42f3ad9c14b9cf6e0ace95f85d0c59c93195036ed243b10eb81f5bdbb"},"length":26563695,"released":"2020-04-17T00:22:46+08:00","url":"/tidb-v3.0.4-darwin-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"4d1423c05d0c79f0b5d5b2ed5fcb1570e44247ea1e6584a207ab621797a30a5d","sha512":"bb10ae842393dfa8341214a55557d5a40df51b00ccb46b78d49f6762fe06747047774e5d8aca372cd691d08e63318951d821502fce6c16874a0fd1a6d938c32f"},"length":32192126,"released":"2020-04-17T00:29:45+08:00","url":"/tidb-v3.0.5-darwin-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"b0bf190285676dacf4481798a9a62472bdfee3d135e23d5fb79fcd32c64fa587","sha512":"2a802f452a10eefbe4549af6017c0d12cba86126d59492bd460a4f7fd66c1ef1e373c815f6ec013658af303fdea9b114fef3a4790c3a33f3c96061e243dc52e5"},"length":27124055,"released":"2020-04-17T00:39:33+08:00","url":"/tidb-v3.0.6-darwin-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"6c3081592ea9b3ae9f87aa572bc861e9cdf3ac4cf05b9ddd047bf6c34f91fe1d","sha512":"4e0c467b7b160080d881e213486a995d1d077102d9b408c4780bc3703ebbff5362349354ce55f2c1881e071c0daefa4f15a3a213b3d57c3533be8f5ad4d570a5"},"length":27120035,"released":"2020-04-17T00:46:32+08:00","url":"/tidb-v3.0.7-darwin-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"983355652ac6d97089f7a551c6cd6296bc7b471a840807a9aee132a406c99ffe","sha512":"1f38756b0098f5241335fd6edf5872470c9d559620115de9a1ac8c1ee657555ba8156642fb819b530e617641d11689f7be061a4ca02474f2ed5dfd5bc44c21f9"},"length":27246015,"released":"2020-04-17T00:54:19+08:00","url":"/tidb-v3.0.8-darwin-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"70df998aa933316717e5bca4d2973935284b6ce49d1755c46dec6737676d570b","sha512":"1ac95ea3b902f5708591edd659f710aef0a4d22583986ba3fb41181d7739d39de1f9149d20f9288390917b3dd651c5fb33ad82ea79532dc392c5222130a1c232"},"length":27332410,"released":"2020-04-17T01:00:58+08:00","url":"/tidb-v3.0.9-darwin-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"4b0c4d69c4e2699ccb5ccae89a81c08817cca106d37644ce261b4f7983e356f7","sha512":"86845acf140cffe808f24124e298db1467dfcd98df6abaf1607b4043ca4c37bf1eaa220e928bff76f9381b8d1929fa1a824aa47e35b482b80414024203ca5b13"},"length":28156482,"released":"2020-05-22T15:34:33+08:00","url":"/tidb-v3.1.0-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7154423e6bdf5fc2c689c7fc725bd7c3b9530a5dfb158e54f5da702129910f7e","sha512":"368e779252547dc6405bbea771cfb58b91c5bd14586ebc14967774b615cdf67297fc5b010b55d0aa77b4e111eafc76842e98f01dd7ab403564834926641731c4"},"length":27202791,"released":"2020-05-22T14:35:59+08:00","url":"/tidb-v3.1.0-beta-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"addf1322c81a79d1a316ae9779e7f143135e703e006f8e1ae7f9d0a32d66cb5e","sha512":"a6b274419dbcb0f9796cee257f29d000786e457466ee819b925012992c2921cbd66570f1705b69cc77f14d03660bc750c66b04472fbdfbfeb45ad4a93cd01f8c"},"length":27422699,"released":"2020-05-22T15:22:30+08:00","url":"/tidb-v3.1.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d5a15fbebd4379fa823c869dc4117d19ff889561bb03b19110c5e84bd0fb26c9","sha512":"e47b4787bfb66e02e9233013d302e72266318e09281140e8dde75f7c085215afa8299fdfa0dc2929360e99f25976da6a9a2ef6e7513e9648234ef9768a28ec45"},"length":28062180,"released":"2020-05-22T15:28:20+08:00","url":"/tidb-v3.1.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5d3d621009148f99d4ceefaa1f75e832d1191eea711e22a5370b360450b5d4a3","sha512":"5d04dedd689f9efdbc6ce776e9161b23ec6d671076cc44e09880b9244fa02c16c0b2489dabc1060c9a577374e72bfae7e786bb53bfaad198ca196ac6227a8c9d"},"length":28184931,"released":"2020-05-22T15:56:23+08:00","url":"/tidb-v3.1.0-rc-darwin-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d9d5739daa91392bae0ce17455a22c86cd1672645373e1eacee6fe46f732003e","sha512":"fdb81f2c2b491b669830dd358b219520f84ffd07e7a90c2fd1448573f8f540fdb700b302ddeffcae3553f4276345e4dec88b2106fc0f25aada30f4a8d79b86ea"},"length":28369009,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-darwin-amd64.tar.gz","yanked":false},"v3.1.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"82b6307eb661d843488e6ab5caee638538526c935022d6ed3e3b581c21ad752a","sha512":"97f86e7ded5c2f26e64a4607c7da761165f5dc235cd28ffa3b45b7474868b0cd51b20494aef2b5c4bed9e59e2d32916504989c4a8ee5160829efb99c854d7bfe"},"length":28368564,"released":"2020-06-04T17:57:09+08:00","url":"/tidb-v3.1.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1aeec438560be6e92f2783ee657827299c1ab4c3f9ae02a3cbc893bf963d3955"},"length":40773797,"released":"2020-05-28T20:10:10+08:00","url":"/tidb-v4.0.0-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e0d904aeee1c0ae215dc363d28d043f21b6d8e52352608e31ab7044b924968ef","sha512":"e1be572e43d53920720b918ee532302165eb9994a2cf46371da7bfe590587988c41d3944add3278e041f7c5947c1fd17aece4a14fe4ead7a3c027ddb6afc9716"},"length":32614395,"released":"2020-05-26T11:18:05+08:00","url":"/tidb-v4.0.0-beta-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"6b9ea806250fe52c7c6db880eee10f83e8e9d0b7c55d281b401eb93527eff1ed","sha512":"b93b373b5eb9b437badab007011fafbbb37c80626be33a46392067352455e8cfeba9faeaaa11d9266502e6bd1553463a3b5cc7d053c427eb7528197f7fcfa3ef"},"length":33489884,"released":"2020-05-26T11:42:48+08:00","url":"/tidb-v4.0.0-beta.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"cd6e91ff48be1c7ff849aa2c3cad2092923695a3c52ef782f602bdf355697d70","sha512":"14fff5efbef85a7635354eeb915346da67cbc0b4f4fbc94e77cff0236ce37a0dd201a0a557976d8b887243d54a4b6dfee219d0afe4912adbf9ff083747ad231a"},"length":34318099,"released":"2020-05-26T11:56:51+08:00","url":"/tidb-v4.0.0-beta.2-darwin-amd64.tar.gz","yanked":false},"v4.0.0-beta.2-nightly-20200629":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1ed0fbc0d2de4a0e7eaf1b45175906934ec2d4768f3bc37ae15854967102a636","sha512":"3aa5faa70bd8009de142523e38048d9896bfe830b599f3f477cfecb56818e07623c8efb79441d5ddb450db781bed333d5b7d49d3dbec508aa65dbb0d0ccac104"},"length":44152618,"released":"2020-06-29T05:00:12+08:00","url":"/tidb-v4.0.0-beta.2-nightly-20200629-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f225e7b60f2b7f4a5264825ca4487089fcc98f0508a150e7928eb2e2b981b5df","sha512":"4827cb724c59cdb06327ab20d9f67ee6e99df67596f3fb28101b05dade6f87ade25e5c384a1ba44cc2ca69f55e72b879e014c6219de11d665213d0d3daee1090"},"length":34458489,"released":"2020-05-26T14:56:06+08:00","url":"/tidb-v4.0.0-rc-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e88ac6fff667c0fbebb8a4a5805d9f174ed57ab172f44a417efef69ebca2b310","sha512":"f535fd63037c1dddec93c9e6c4bca0ed27243babdc06fa80943480abe68843e2af04f9193129345d9102e0604f7438adc12a6b4852b04e5861490a23b9cf8d6b"},"length":34826086,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-darwin-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9c3d54eaf90e700e83fbe5354d980deb14fa4d080b93422b0b399efe8732df1b","sha512":"841f78400e06d77e855c56286081611e5861f8fbd9dee4d8408bd4ac7e4baf45c2c26b04053ab3230c8004ee7d2c08aa263c2df642711a2ba0298ff3900146d3"},"length":40461780,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-darwin-amd64.tar.gz","yanked":false},"v4.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5a024307caaef69694d8ddfc7ab7323222b7a66728365b2679ef151c59f250f8","sha512":"bcb06f628faba5682026f76409ed490d7caf1825769ed2c478ab29830d795aa26e08011db358a1574afac49b7f7a9d3230e5f60bcf2fe9a1d60dcfba4c1a2d16"},"length":40773791,"released":"2020-06-12T21:22:29+08:00","url":"/tidb-v4.0.1-darwin-amd64.tar.gz","yanked":false},"v4.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"503580650f2efa8f48eb2373deac121e5c634a0368562ef7117dc46c86fb0828","sha512":"edccee096d4825bd4065e828af1aa4cc9f83f0823c580d3304870b21900fe3637607028f1233895f73e869beac2ca00f54a17d9c13be60a53f8a93071c614f3f"},"length":41043044,"released":"2020-07-01T20:01:44+08:00","url":"/tidb-v4.0.2-darwin-amd64.tar.gz","yanked":false}},"linux/amd64":{"v3.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c08a9b1211af6f6a2e6b3393e2c9bacb2e5485739da0942022fd9e3b14949fd3","sha512":"948878fcfd7fb616cc4dc84545187492984cbf31d53c41b9c761d37bdb01068efd5779aa89919bfab938d97fcfdeb0f7e405051993c67278636daa9bcf3f4521"},"length":26787208,"released":"2020-04-16T16:58:06+08:00","url":"/tidb-v3.0-linux-amd64.tar.gz","yanked":false},"v3.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"91758e462c216bd273c4afabcfeabc06c037d555a6e23f1a06ab58b3b9cd28c3","sha512":"871c714bd1daba19c6a3729649b3c5b9f2eb3d739f7decea759d7efa85a15f9c2565e1e9de20deb286823ce53ad37cb7941be717fa171917c02d847a655deff1"},"length":26191751,"released":"2020-04-16T14:03:31+08:00","url":"/tidb-v3.0.0-linux-amd64.tar.gz","yanked":false},"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"acce252326320f6d54c1ca737a358c5b5c5e1b54a965f30133c4ef8c6df10109","sha512":"2e455a849f5f185397bdf0865397a6b7c169ad0ec285a4360002235e9a777452603c36e89b92ab0d59ad523193b2cc38252ee42ccbd18c5fbe577a452fbb4deb"},"length":25957605,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-linux-amd64.tar.gz","yanked":false},"v3.0.10":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"346ddb4078fadbed6bd59dac98c04f2d4b7134a54da0018c9a16deef7e9016be","sha512":"f0561c7a985c00dfdcc0e0f2743f8a91be206cf1bc9c89929409f2e8b1bf109862fc7274cd0734e76364fd02193296689ad71b7a47b59a3e5d16534b04249fef"},"length":27008746,"released":"2020-03-13T14:11:53.774527401+08:00","url":"/tidb-v3.0.10-linux-amd64.tar.gz","yanked":false},"v3.0.11":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9232886ac827634fec1def3f5575f3cd4d35675a1b18f07d3a35f84b83f3848e","sha512":"3cddae74d3ea50fd90b056bd97f635db789b4d4094e516aed282efc20fcbe4371f4e575c2d8b533ca5a0fe870cdd8b9cd7dad25c24b222220081adb504c5b92e"},"length":27019356,"released":"2020-04-17T01:09:20+08:00","url":"/tidb-v3.0.11-linux-amd64.tar.gz","yanked":false},"v3.0.12":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"70221ca570c31bc6e703169d49bf5e3f1c241a889d3adce47ec7d1f865db5a62","sha512":"8873134f0927e043ea66799e6bed8fa7dd477c9ec2b195e4a89e53957a6d3d8646a191ac286e307fa525dd34f62f7051be433db12108c874ae09c851cffed41f"},"length":27056612,"released":"2020-04-17T01:16:04+08:00","url":"/tidb-v3.0.12-linux-amd64.tar.gz","yanked":false},"v3.0.13":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"367de505050520a530f2c5e8bd7b4e1b58305e6a4408c3617c9658248956de78","sha512":"0311fe99bea76e6e92cf06354b95a2adc3ecd83f9cd72db8d1eeebc5ba837e52339fbd1f3caad4efd9b24903615478355529b5434995b0ae7c4c13b4eb783140"},"length":27057866,"released":"2020-04-26T17:25:01+08:00","url":"/tidb-v3.0.13-linux-amd64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"9e4a8ed5aa19e0eee6124ee9f86c400687d641454b461df9419fc5005b83fae1","sha512":"08c0ce3b960211d03b4608c6f335fe58d16246cf58db5a14112722ec86a5f9fbb8d4c7178faf2e3c910400cbcfe1890707a10c825b95320ee3605739d8aae873"},"length":27153764,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-linux-amd64.tar.gz","yanked":false},"v3.0.15":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"36bed0b0e8e5f762bf6c43eaf657700c64cdf08e76fc27c9c672f39df022fc41","sha512":"8ed57ce9c1b385968cf7475f61ae0a1a55abb57b6a6f0fd2e22a04812c6cb94e0efac3646ac37a7bb32602f87b703c9660aa5b62cba97a78c01e3b1b7e51cc1d"},"length":27194062,"released":"2020-06-05T16:50:59+08:00","url":"/tidb-v3.0.15-linux-amd64.tar.gz","yanked":false},"v3.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"41c0be9510fb590d688527145fdd3252069234e60059030d1a1695839780d045","sha512":"6517339c3f333e0e14963e66e7675232cd2f6a64b808af5c76279342a39314d5c588ec62dcd71c7cec0edfc726d475fc5c34e30ec45d0c5ff85eec749542a85a"},"length":26356240,"released":"2020-04-16T23:55:11+08:00","url":"/tidb-v3.0.2-linux-amd64.tar.gz","yanked":false},"v3.0.3":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bfcf9fafd39f41767fc90367beae6e7624427c485ffe3dc4e90f44c0e264fad1","sha512":"35b7b61796c504b44541180f98dbc7471cda859790cc6c11f86b9594e390dc060b2e2182f01c4cc55d05968b92d8c43b2793ffb9c1aaac4ca3f3f05c35c8ffab"},"length":26411379,"released":"2020-04-17T00:16:31+08:00","url":"/tidb-v3.0.3-linux-amd64.tar.gz","yanked":false},"v3.0.4":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f5c9a39f083e7384fd1a1b04cec5510c37237d58b9cc99c036a094639b4e08bc","sha512":"3052398c4a12a6ee933d5ae717e2294b08be6d2bb5a136e8336e082018764860bfba82fb0020549a886f2c4ad42da7ef8785a0a98815414be852ad28c799fcfd"},"length":26761632,"released":"2020-04-17T00:22:46+08:00","url":"/tidb-v3.0.4-linux-amd64.tar.gz","yanked":false},"v3.0.5":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d847f9214241d7676da39d0704c9e5731843d8a596f2ce9f1d9fab8e9823e7f4","sha512":"4fd4ea0cd3404eea2abe6a4267a109c6b4e58939bb4f8efbf9196f068f0393f19c45250bc92bf3dae5dc354bb080956f381cb1a4eed33909bee9aab77cedc80f"},"length":26260883,"released":"2020-04-17T00:29:45+08:00","url":"/tidb-v3.0.5-linux-amd64.tar.gz","yanked":false},"v3.0.6":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2dd998f99a964f417d2dccc88fd31c8afda59c81e0a4deb0897bdca2aadea998","sha512":"718db462b4f1120f90e25eed3b8db3b31b75e9860c891b7d602c70f86b02159112a4fef24aafde4aacfb0f3e64977c0f969e62d087980b22ae694ae5b6989849"},"length":26778819,"released":"2020-04-17T00:39:33+08:00","url":"/tidb-v3.0.6-linux-amd64.tar.gz","yanked":false},"v3.0.7":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"4c710645f76255bd591dcb465cb17cc61d9905929617ebec44eb98d84cf0d45f","sha512":"1344e43c4e30770bfa399a95d9ffd50fd45cc80ef5fabd89069af47e014804eec6ffc18e1c262d7a24c3a5ad9d06c0b28bf0cee63f46d638fb7d66d6b415bad1"},"length":26787172,"released":"2020-04-17T00:46:32+08:00","url":"/tidb-v3.0.7-linux-amd64.tar.gz","yanked":false},"v3.0.8":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c7002d37bd1b1492445e9d1c7dd3a642f43888750c591f5bb026d7f9c983cc6e","sha512":"b6c9c3c3a5e6e8ac30d6e43376b1f1500de1d06d305a3c33d87676490a6a738c8256c2eedd3f8ce4abf3c193896e27f3700c10a6f2f7082b6d1c3c495b861d33"},"length":26903200,"released":"2020-04-17T00:54:19+08:00","url":"/tidb-v3.0.8-linux-amd64.tar.gz","yanked":false},"v3.0.9":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"d76da75d2122a3aef6805a4c936b24f1e6af1466163501706aab0ae900d260f9","sha512":"79f0f4d0d91a54cbd0bf2489dd683541f61ab606893a423b9c0911a40a630f20a789e9df5ce73ef2c2cf2554926e63ba5589ae439c6b90619cc29006f2d4edc6"},"length":26997279,"released":"2020-04-17T01:00:58+08:00","url":"/tidb-v3.0.9-linux-amd64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bf7d58ee1565daa4d24370e32adeca87edded2f497ab277c137fa3e0652b4101","sha512":"f75dbf6ad349ae96bd157fdc6c0932627c230eeeacffe0091e123f3453136423cdc41968717c0263c562b0491dcfe0b5e9989fe3e35e0cc751964f250e27d95c"},"length":27796291,"released":"2020-05-22T15:34:33+08:00","url":"/tidb-v3.1.0-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c4f7414be060716ba0b841e0fb0075faa1fa15d99db246a5500a527fe338e7bc","sha512":"895f1edaf11cca877b6ba6ce35f19b48cdf5527fabe697a8c11290d33d464204642527eb26581853c8b89fa69dd1a3dfabf940bb60a7d9ae99a55e9c45c19257"},"length":26868228,"released":"2020-05-22T14:35:59+08:00","url":"/tidb-v3.1.0-beta-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"47fd44d4c68195bec2bae4de912b2d6b3dc58cd74faa83f166d2939dcd2398d4","sha512":"7eb26d17f38e919b8970b687c1266c5cf3dec9db8ac7910597826fc45406d6aeec506bd7690309acc927c9b3c39d50400e92155a527464be2638428828047843"},"length":27077345,"released":"2020-05-22T15:22:30+08:00","url":"/tidb-v3.1.0-beta.1-linux-amd64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"cf2079cd886c3878db85a6484083d926f73562dcb93444e89efc9e444f534b8c","sha512":"97baba009d34171b882d46e16af1ad1524f56c2ef10cc3cc9fbeabe874367bad10f312701e751348ddc9bbc07a7957010fe932baab7cc558eb855c171ded9ec4"},"length":27717224,"released":"2020-05-22T15:28:20+08:00","url":"/tidb-v3.1.0-beta.2-linux-amd64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"88dca8e755713b9e5f21deab0eb0c7687f668ff48c1839ffa1d346b854721840","sha512":"5030bc57db3c0a002b9f955170f8a4f3233a334c27761b8c60c2b12280d128323ef6fbf5566ec851c7eeebc03c3c4d0dd0d2d8a750f3cd191a6a9227d5297416"},"length":27840449,"released":"2020-05-22T15:56:23+08:00","url":"/tidb-v3.1.0-rc-linux-amd64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bd7b1e1cbafac6b4875fef0360083dff74ee65cbcd72d2e4da7ea84c47f83f6c","sha512":"fbc8ce3048f7b09ecda269b891fb0acff320682711e59d11886674f22194cc5b9b2259ba4deac7d1beccf61a7cf29e3fb322b749eb41aa260f2970fd13852496"},"length":28015426,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-linux-amd64.tar.gz","yanked":false},"v3.1.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"981128adf9ccaf62c13fb677b9467265ff36c76adbafb3fd6f774e61d6879827","sha512":"c3818ac13639b3e7da8f232fa9c773dc27ad742d5fc27aeb27b5ac4a1b80c5439df0baa2a2b7bb77f6f4b7c8cd16b59c04d4b906b2c5a650106370021c6b7bfd"},"length":28016399,"released":"2020-06-04T17:53:39+08:00","url":"/tidb-v3.1.2-linux-amd64.tar.gz","yanked":false},"v4.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"48edaf41220e07199c2f842ebbf52a2eb543b6c8beebb1f21270694e4dbc3fa1"},"length":40481686,"released":"2020-05-28T16:23:23+08:00","url":"/tidb-v4.0.0-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"7e6ebdebb1af2135119fc1cb34f1253b88f563f8b9e299e8d69a714bf5809043","sha512":"11a719f45b2dc4572aa1d158679d15a1ad1f02188790f016f78e7b62fb0038fa30bfec2ccf8a75b26ab11bbf572598652866b568052f24b6180b1f7f0ca8697b"},"length":32294889,"released":"2020-05-26T11:18:05+08:00","url":"/tidb-v4.0.0-beta-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"a17b34e13d27f9a0bb6f4c3736162af1fbf6d423b9d6ddc402e53bd88e3feb0b","sha512":"5f3debdbf73ef3e0de0dbef2f79d2cdb2516df6fc03103239a4e649759ebfc583fd7f326f8793598c617b004de80087bb90553a237b40b028809c927c1b787c8"},"length":33118956,"released":"2020-05-26T11:42:48+08:00","url":"/tidb-v4.0.0-beta.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"c454f5aa16f94b36b834d1bd7be9fef0767738c9ccc1273b834b6bab04f073b8","sha512":"e936487500fd1b646a0a8f46ba0afc5032dd983c45789af71f918d269329b7d7fcaa9859a17c43eddd0676f1bf6100fbb3977d1ba3e54105725c54219b2fc162"},"length":33940011,"released":"2020-05-26T11:56:51+08:00","url":"/tidb-v4.0.0-beta.2-linux-amd64.tar.gz","yanked":false},"v4.0.0-beta.2-nightly-20200629":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"93bde8b1259abadf19240b570f4f5523f5f781b12421c6a669856d4376a5d513","sha512":"4a50c14754b1b29fc423d81434dde00304110b036c4a2940250e94e05836ea87f10a5b890d443b27b6a6f999a115f4a8db86ca300b26a6129e9b64bb94c5c0ec"},"length":43922053,"released":"2020-06-29T04:57:29+08:00","url":"/tidb-v4.0.0-beta.2-nightly-20200629-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"46b0db34ccc02d34663667c58032f158cd093acdc79da5c080eff30221ed0072","sha512":"4f65c55109b3dcce316196599974d51da06c3da71948171937233187dfcc2f27a2f52c2900dbc84507ec0efdd2efa7293463a402117ceaa4e3623b44c0ce25e7"},"length":34051221,"released":"2020-05-26T14:56:06+08:00","url":"/tidb-v4.0.0-rc-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"2d5fab2bcc8793d85447ad2769eac8fca58d25de685039296e30cee0d0798137","sha512":"71ebc639cb23bd9671696ed43c974be6ab74781cf6543e5f3ff35ae69fac882557b6b49f8df8c7ee7cb5abeaef28b71f8edbb02d921a1aa2af6d58a33fc36b6d"},"length":34431514,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-linux-amd64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0bcd84a48071c4059519e64e743f40e6435670db0c4d9050f11b48136543a6ff","sha512":"f36c63ca590b624a0aea518f5141624b13c6d36b20f5ab0c0c973723cd481fee51753aa1b538a7fc1a81a3fb409830ed98576f0eb4d13b1bbcf31ce5eed67596"},"length":40116238,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-linux-amd64.tar.gz","yanked":false},"v4.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"b80edea55cdd3bfaa79b5e0d25ae23e96decd574913801c8f5265b9b7af81d11","sha512":"ae83c1b94139c873e56940e2f872f54c4b6013c3c0d521a9176389bb52f860ea6a5cf6566a30b2d45273a2b339f25747fb30eb5ce7549fc5624acab722da75db"},"length":40473132,"released":"2020-06-15T12:00:45+08:00","url":"/tidb-v4.0.1-linux-amd64.tar.gz","yanked":false},"v4.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"32178adcfebabd3a49099c905f4389fdcc446e69c64a19bb7431782f83d7b956","sha512":"b1d561978d34f879660083081bfcf29cc546967cdcbbb85e7d8bd45f49323f128bc53d2fda907ac4735669ea406acd088d97dcd528de55e95b4954fb3746e2e9"},"length":40735884,"released":"2020-07-01T19:57:14+08:00","url":"/tidb-v4.0.2-linux-amd64.tar.gz","yanked":false}},"linux/arm64":{"v3.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"a90257c97d3c2ad4591899bee76328d2ca7db5654f55a39f5258bde1ddd025bc","sha512":"443dd0b778d2ac455687db8a891b88523061e86d8d7ea5c96f15bfacd8022a83565bac6af9e4f1cd99843f209bb08aee57b3e16cfe1b0321fe46286ccc620109"},"length":24791080,"released":"2020-04-27T19:38:36+08:00","url":"/tidb-v3.0.1-linux-arm64.tar.gz","yanked":false},"v3.0.14":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0d303763b988d9262f5c7a750212b61786c56234823bde988922c1a205cdef13","sha512":"1db89fc07db4258aea659d832eb642561f59cb71de3eee14a52422889bf6c0ee03c99b27d7ffad20251b3267a680cc487f814f887e5a28e63ee69cc9f3ac9e6c"},"length":25904695,"released":"2020-05-09T21:11:49+08:00","url":"/tidb-v3.0.14-linux-arm64.tar.gz","yanked":false},"v3.0.15":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"900c48cb13bf9c20d52051ff2e935d7c01c706673a517b0292f9ca0dd36cef05","sha512":"96651d9e8bb5267900b3e415a3dc53deb0c6515ca62e508c8d93a4b202402cb571a6050355e6f6eb52dd5649a76b0d73b6c4f0e536bed7cc1624503b57ade3c6"},"length":25955262,"released":"2020-06-05T16:53:32+08:00","url":"/tidb-v3.0.15-linux-arm64.tar.gz","yanked":false},"v3.1.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e48df49c5b12ec301a2e5c8f526f82d02ddacf86084068aa7c7d7bf00f1d43ef","sha512":"0eeeff903bc436734c4d7d5fbea965bca29befa84043e4e800d85335582a11ee5162a6850236a8e67fb0cd71d551b8d72f8ec3014c3cec3b6c0043ecfbc906bd"},"length":26492125,"released":"2020-05-22T15:34:33+08:00","url":"/tidb-v3.1.0-linux-arm64.tar.gz","yanked":false},"v3.1.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"02e3256c53eef4dbbdd3575f10d5697dcea1ce9e4e139383d777bcc83869260d","sha512":"705d4d1a02b7657200539d41f7c0a45aef9ed09e784991de366d232aa3b5c0774408e16a2e6b91588c2256ede3786b565141493b3076ceee4d9fc99b058564ba"},"length":25629984,"released":"2020-05-22T14:35:59+08:00","url":"/tidb-v3.1.0-beta-linux-arm64.tar.gz","yanked":false},"v3.1.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"e5484ded8d48e13ad25b600e5fa3f66c634de3bfc1624208143ee90ace3578f7","sha512":"8b566e03037a173d5c468e488e9faeae2a00d02bc7a4f464ffa30b45e81201c622843c001ae52139786530a13799ab9dcec94edbdb3a3bc7a4bc4e02ad63f446"},"length":25852605,"released":"2020-05-22T15:22:30+08:00","url":"/tidb-v3.1.0-beta.1-linux-arm64.tar.gz","yanked":false},"v3.1.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"15498fbad839b1939ceb5a9339b74f349b77e4d26fc5f57815f33958f15d344e","sha512":"8a941309a0edb7e070bce86ca988d7174a262be377720157dac7d5df131caea34116fdbe376bdee196bd8ff8813fe39d796b849cedf4f7c6be91cdfc9a0ff636"},"length":26427881,"released":"2020-05-22T15:28:20+08:00","url":"/tidb-v3.1.0-beta.2-linux-arm64.tar.gz","yanked":false},"v3.1.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"150f9e56a4d20f3abc2f0995e5ff1e9b71ba40162db0af42912e11238ec1bcc5","sha512":"82b22c14f18b41277d214b0c5def64fa96dcd376c3135debdfc69267e9821737daa679a8993afedbbd46552b02542d804b176a662854c04f6613fce95356be5d"},"length":26548117,"released":"2020-05-22T15:56:23+08:00","url":"/tidb-v3.1.0-rc-linux-arm64.tar.gz","yanked":false},"v3.1.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"35209970a3014f3d655c220acf6259b2fe16c28f5efdfb61150681070e736a5e","sha512":"5149ee0260dd877fdd13821e243ddc8fc156475ca4a3fe0c04b066836c38170cff3f2c1611fbffde920e6d9833718a8bd174258073e1762ec7a1238c1c7ea77d"},"length":26595507,"released":"2020-04-30T21:02:32+08:00","url":"/tidb-v3.1.1-linux-arm64.tar.gz","yanked":false},"v3.1.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"bcdb3cb0e2fd75337a019b4a94d9f324d37cd069f0123a0074bae54595e04312","sha512":"5bfe1a5fde54a9e3328a7e5389b2469cec6326621c9d9964189535870714c004983d16e92e50b30d47e3e6a31784fec7311c4dc54773943d9ec07516a9b5d985"},"length":26594970,"released":"2020-06-04T17:55:40+08:00","url":"/tidb-v3.1.2-linux-arm64.tar.gz","yanked":false},"v4.0.0":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"ad1bf23b8eec2ab65e3e41bc1970ecaa0ca4d60979f45e48a74e83ddb6606426"},"length":38318862,"released":"2020-05-28T20:10:11+08:00","url":"/tidb-v4.0.0-linux-arm64.tar.gz","yanked":false},"v4.0.0-beta":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"f2eceda6a197735070308c5900c06b2c44f5fcb919a245b52a706b64219614f6","sha512":"2b4ea9e2034a4b6948ef61e36907ba6ba1706b5b6204b67b033b5fdd9b19edc3a61f27c5714d22d7b08524ab07b259f23d4d98bbd48ad3acda31dd647d315919"},"length":30628110,"released":"2020-05-26T11:18:05+08:00","url":"/tidb-v4.0.0-beta-linux-arm64.tar.gz","yanked":false},"v4.0.0-beta.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"5f091a07d8f1570c2998a69cb58470ec157a461024ca0691dc77c84b6c6be8b6","sha512":"6c0e3b4f7d7d44c9749eab89f366b604cec7c0fdb2ffb4e899f57af993e424ab695493b4b7e07ab7fa2d55996dab6d84878396f4780068208d1ece978414353a"},"length":31396287,"released":"2020-05-26T11:42:48+08:00","url":"/tidb-v4.0.0-beta.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-beta.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"0db43d917e73e4151aa5a009c8370a5e18c350e38d324ece844367d2db097358","sha512":"4cac617b54b6245f85c323a686ae7fe3125a2233c2946f08a3620cb66dbf3738c9916215ef255cd9c583e57d81df74388a789547a5d568f74785019d59f6f41c"},"length":32162424,"released":"2020-05-26T11:56:51+08:00","url":"/tidb-v4.0.0-beta.2-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"964ff7932ac73c77389e0230bd883a9ea124954e08ed186ae3da683e644fb15f","sha512":"979dff98cda72a6d2ab2b0973afce48f597dbfe4ad6f547ff5dfefff5657bdd1d7786c91c290e1a4a7ad1148061d53ecde195256c4d16a6f9e3ece9ad1ceab32"},"length":32274928,"released":"2020-05-26T14:56:06+08:00","url":"/tidb-v4.0.0-rc-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1964979c80f03e20dda9c6e3b5abbc77fe8345c8a8b4bb2c15aaef92a72f2136","sha512":"218cb429a237e3d6d9e2dd7bc43b605a0dad5bae5a2a1cc0af3e8f7336b85ea54502a5729e8034c540c9c6b3eca97dc49c40124c106ea0c15335d2cdb2bc6fc9"},"length":32626370,"released":"2020-04-29T01:03:31+08:00","url":"/tidb-v4.0.0-rc.1-linux-arm64.tar.gz","yanked":false},"v4.0.0-rc.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"63297933955cdfb166073722b4d2c1f95191f36e25dae977223889c3123f71c3","sha512":"56e1b8d267e658d4b58c6695976df01836386070cf5acc23cbc566f0e0102143381b85569f744c735eeddba9d7272243849e8e90d484d644347fa1b0a86bef91"},"length":38034242,"released":"2020-05-15T21:54:51+08:00","url":"/tidb-v4.0.0-rc.2-linux-arm64.tar.gz","yanked":false},"v4.0.1":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"340ee4a4871b8e560ec4c8cdfaf2e4b0c8e90eb8cc72fc5e90129c27286562e5","sha512":"ad2d18321be9d4a6cd71853954bb356a12318e7b4cd357f978324bf9434e4ecd82244c80bc3b005855990e18104ae2fee1d87b5aefcab175a5b70f822ba1d404"},"length":38328065,"released":"2020-06-12T21:21:12+08:00","url":"/tidb-v4.0.1-linux-arm64.tar.gz","yanked":false},"v4.0.2":{"dependencies":null,"entry":"tidb-server","hashes":{"sha256":"1d46c0579b9aa7f8328c6d7c03f7a042e2aaf01e1864880eacb5a96a02fbe226","sha512":"d344877e3d62b4b9caa56281915eb548dd531e4bc84e57fb3132ece26c6839bf2bb0ff5b7483e640b6c3223ffb5a1ea1b1684fd5e78d51886457296f96027d25"},"length":38566695,"released":"2020-07-01T19:59:39+08:00","url":"/tidb-v4.0.2-linux-arm64.tar.gz","yanked":false}}},"spec_version":"0.1.0","version":62}}tiup-1.16.3/pkg/repository/v1manifest/testdata/polluted/manifests/timestamp.json000066400000000000000000000012511505422223000301620ustar00rootroot00000000000000{"signatures":[{"keyid":"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd","sig":"V8MgDDCmfVb8N0O3unbAno8q6i2Ag1Sbr/3n12Odk8McKzZaif7OcDm1IZB5J3o7ajsBF1tduTrcO7OijJQvx8l9i6aZi9J1lb/eJpYsyvQWdzd/T7osdRkEIhtM4/sGFjGslOolTFmpA/U5IkJ+FWAi38YaFPRn8bfIPLGniRAYs4/qjLBB3RgBUlDIIVvTiJIHEHtf3Bqb5LjpEjW4XhmDK94LJbKUqfO/6oDnQzI6Rot7zBWwDQVrIHakvQxoqA5c2jtMHCXSdX9cN7aRrNO4csggMzvQot7K0JYYszlroXnsL2ioNMgcPhtoEaMLW9mFjmdgR0j1//n1mxtdWA=="}],"signed":{"_type":"timestamp","expires":"2000-08-01T14:47:48+08:00","meta":{"/snapshot.json":{"hashes":{"sha256":"24c9fa83f15eda0683999b98ac0ff87fb95aed91c10410891fb38313f38e35c1"},"length":1760}},"spec_version":"0.1.0","version":639}}tiup-1.16.3/pkg/repository/v1manifest/types.go000066400000000000000000000202241505422223000213260ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "encoding/json" "errors" "fmt" "time" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/mod/semver" ) // Manifest representation for set/de. type Manifest struct { // Signatures value Signatures []Signature `json:"signatures"` // Signed value; any value here must have the SignedBase base. Signed ValidManifest `json:"signed"` } // RawManifest representation for set/de. type RawManifest struct { // Signatures value Signatures []Signature `json:"signatures"` // Signed value; raw json message Signed json.RawMessage `json:"signed"` } // Signature represents a signature for a manifest type Signature struct { KeyID string `json:"keyid"` Sig string `json:"sig"` } // SignedBase represents parts of a manifest's signed value which are shared by all manifests. type SignedBase struct { Ty string `json:"_type"` SpecVersion string `json:"spec_version"` Expires string `json:"expires"` // 0 => no version specified Version uint `json:"version"` } // SetExpiresAt set manifest expires at the specified time. func (s *SignedBase) SetExpiresAt(t time.Time) { s.Expires = t.Format(time.RFC3339) } // ValidManifest is a manifest which includes SignedBase and can be validated. type ValidManifest interface { isValid() error // Base returns this manifest's SignedBase which is values common to all manifests. Base() *SignedBase // Filename returns the unversioned name that the manifest should be saved as based on its Go type. Filename() string } // Root manifest. type Root struct { SignedBase Roles map[string]*Role `json:"roles"` } // Role object. type Role struct { URL string `json:"url"` Keys map[string]*KeyInfo `json:"keys"` Threshold uint `json:"threshold"` } // KeyInfo is the manifest structure of a single key type KeyInfo struct { Type string `json:"keytype"` Value map[string]string `json:"keyval"` Scheme string `json:"scheme"` } // Index manifest. type Index struct { SignedBase Owners map[string]Owner `json:"owners"` Components map[string]ComponentItem `json:"components"` DefaultComponents []string `json:"default_components"` } // Owner object. type Owner struct { Name string `json:"name"` Keys map[string]*KeyInfo `json:"keys"` Threshold int `json:"threshold"` } // VersionItem is the manifest structure of a version of a component type VersionItem struct { URL string `json:"url"` Yanked bool `json:"yanked"` Entry string `json:"entry"` Released string `json:"released"` Dependencies map[string]string `json:"dependencies"` FileHash } // Component manifest. type Component struct { SignedBase ID string `json:"id"` Description string `json:"description"` Nightly string `json:"nightly"` // version of the latest daily build // platform -> version -> VersionItem Platforms map[string]map[string]VersionItem `json:"platforms"` } // ComponentItem object type ComponentItem struct { Yanked bool `json:"yanked"` Owner string `json:"owner"` URL string `json:"url"` Standalone bool `json:"standalone"` Hidden bool `json:"hidden"` } // Snapshot manifest. type Snapshot struct { SignedBase Meta map[string]FileVersion `json:"meta"` } // Timestamp manifest. type Timestamp struct { SignedBase Meta map[string]FileHash `json:"meta"` } // FileHash is the hashes and length of a file. type FileHash struct { Hashes map[string]string `json:"hashes"` Length uint `json:"length"` } // FileVersion is just a version number. type FileVersion struct { Version uint `json:"version"` Length uint `json:"length"` } // Boilerplate implementations of ValidManifest. // Base implements ValidManifest func (manifest *Root) Base() *SignedBase { return &manifest.SignedBase } // Base implements ValidManifest func (manifest *Index) Base() *SignedBase { return &manifest.SignedBase } // Base implements ValidManifest func (manifest *Component) Base() *SignedBase { return &manifest.SignedBase } // Base implements ValidManifest func (manifest *Snapshot) Base() *SignedBase { return &manifest.SignedBase } // Base implements ValidManifest func (manifest *Timestamp) Base() *SignedBase { return &manifest.SignedBase } // Filename implements ValidManifest func (manifest *Root) Filename() string { return ManifestFilenameRoot } // Filename implements ValidManifest func (manifest *Index) Filename() string { return ManifestFilenameIndex } // ComponentList returns non-yanked components func (manifest *Index) ComponentList() map[string]ComponentItem { components := make(map[string]ComponentItem) for n, c := range manifest.Components { if c.Yanked { continue } components[n] = c } return components } // ComponentListWithYanked return all components func (manifest *Index) ComponentListWithYanked() map[string]ComponentItem { return manifest.Components } // Filename implements ValidManifest func (manifest *Component) Filename() string { return manifest.ID + ".json" } // Filename implements ValidManifest func (manifest *Snapshot) Filename() string { return ManifestFilenameSnapshot } // Filename implements ValidManifest func (manifest *Timestamp) Filename() string { return ManifestFilenameTimestamp } // HasNightly return true if the component has nightly version. func (manifest *Component) HasNightly(platform string) bool { if manifest.Nightly == "" { return false } v, ok := manifest.Platforms[platform][manifest.Nightly] if !ok { return false } return !v.Yanked } // VersionList return all versions exclude yanked versions func (manifest *Component) VersionList(platform string) map[string]VersionItem { versions := make(map[string]VersionItem) vs := manifest.VersionListWithYanked(platform) if vs == nil { return nil } for v, vi := range vs { if vi.Yanked { continue } versions[v] = vi } return versions } // LatestVersion return the latest version exclude yanked versions func (manifest *Component) LatestVersion(platform string) string { versions := manifest.VersionList(platform) if versions == nil { return "" } var last string var lastStable string for v := range versions { if utils.Version(v).IsNightly() { continue } if last == "" || semver.Compare(last, v) < 0 { last = v } if semver.Prerelease(v) == "" && (lastStable == "" || semver.Compare(lastStable, v) < 0) { lastStable = v } } if lastStable == "" { return last } return lastStable } // VersionListWithYanked return all versions include yanked versions func (manifest *Component) VersionListWithYanked(platform string) map[string]VersionItem { if manifest == nil { return nil } vs, ok := manifest.Platforms[platform] if !ok { vs, ok = manifest.Platforms[AnyPlatform] if !ok { return nil } } return vs } // ErrLoadManifest is an empty object of LoadManifestError, useful for type check var ErrLoadManifest = &LoadManifestError{} // LoadManifestError is the error type used when loading manifest fails type LoadManifestError struct { manifest string // manifest name err error // wrapped error } // Error implements the error interface func (e *LoadManifestError) Error() string { return fmt.Sprintf( "error loading manifest %s: %s", e.manifest, e.err, ) } // Unwrap implements the error interface func (e *LoadManifestError) Unwrap() error { return e.err } // Is implements the error interface func (e *LoadManifestError) Is(target error) bool { t, ok := target.(*LoadManifestError) if !ok { return false } return (e.manifest == t.manifest || t.manifest == "") && (errors.Is(e.err, t.err) || t.err == nil) } tiup-1.16.3/pkg/repository/v1manifest/types_test.go000066400000000000000000000063101505422223000223650ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package v1manifest import ( "errors" "fmt" "testing" "github.com/stretchr/testify/require" ) func TestComponentList(t *testing.T) { manifest := &Index{ Components: map[string]ComponentItem{ "comp1": {}, "comp2": {Yanked: true}, }, } list := manifest.ComponentList() require.Equal(t, len(list), 1) _, ok := list["comp1"] require.True(t, ok) list = manifest.ComponentListWithYanked() require.Equal(t, len(list), 2) _, ok = list["comp1"] require.True(t, ok) _, ok = list["comp2"] require.True(t, ok) } func TestVersionList(t *testing.T) { manifest := &Component{ Platforms: map[string]map[string]VersionItem{ "linux/amd64": { "v1.0.0": {Entry: "test"}, "v1.1.1": {Entry: "test", Yanked: true}, }, "any/any": { "v1.0.0": {Entry: "test"}, "v1.1.1": {Entry: "test", Yanked: true}, }, }, } versions := manifest.VersionList("linux/amd64") require.Equal(t, len(versions), 1) _, ok := versions["v1.0.0"] require.True(t, ok) versions = manifest.VersionListWithYanked("linux/amd64") require.Equal(t, len(versions), 2) _, ok = versions["v1.0.0"] require.True(t, ok) _, ok = versions["v1.1.1"] require.True(t, ok) versions = manifest.VersionList("windows/amd64") require.Equal(t, len(versions), 1) _, ok = versions["v1.0.0"] require.True(t, ok) manifest = &Component{ Platforms: map[string]map[string]VersionItem{ "linux/amd64": { "v1.0.0": {Entry: "test"}, "v1.1.1": {Entry: "test", Yanked: true}, }, }, } versions = manifest.VersionList("windows/amd64") require.Equal(t, len(versions), 0) } func TestLoadManifestError(t *testing.T) { err0 := &LoadManifestError{ manifest: "root.json", err: fmt.Errorf("dummy error"), } // identical errors are equal require.True(t, errors.Is(err0, err0)) require.True(t, errors.Is(ErrLoadManifest, ErrLoadManifest)) require.True(t, errors.Is(ErrLoadManifest, &LoadManifestError{})) require.True(t, errors.Is(&LoadManifestError{}, ErrLoadManifest)) // not equal for different error types require.False(t, errors.Is(err0, errors.New(""))) // default Value matches any error require.True(t, errors.Is(err0, ErrLoadManifest)) // error with values are not matching default ones require.False(t, errors.Is(ErrLoadManifest, err0)) err1 := &LoadManifestError{ manifest: "root.json", err: fmt.Errorf("dummy error 2"), } require.True(t, errors.Is(err1, ErrLoadManifest)) // errors with different errors are different require.False(t, errors.Is(err0, err1)) require.False(t, errors.Is(err1, err0)) err2 := &LoadManifestError{ manifest: "root.json", } // nil errors can be match with any error, but not vise vera require.True(t, errors.Is(err1, err2)) require.False(t, errors.Is(err2, err1)) } tiup-1.16.3/pkg/set/000077500000000000000000000000001505422223000141325ustar00rootroot00000000000000tiup-1.16.3/pkg/set/any_set.go000066400000000000000000000037161505422223000161320ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package set import "slices" // AnySet is a set stores any type AnySet struct { eq func(a any, b any) bool slice []any } // NewAnySet builds a AnySet func NewAnySet(eq func(a any, b any) bool, aa ...any) *AnySet { slice := []any{} out: for _, a := range aa { for _, b := range slice { if eq(a, b) { continue out } } slice = append(slice, a) } return &AnySet{eq, slice} } // Exist checks whether `val` exists in `s`. func (s *AnySet) Exist(val any) bool { for _, a := range s.slice { if s.eq(a, val) { return true } } return false } // Insert inserts `val` into `s`. func (s *AnySet) Insert(val any) { if !s.Exist(val) { s.slice = append(s.slice, val) } } // Intersection returns the intersection of two sets func (s *AnySet) Intersection(rhs *AnySet) *AnySet { newSet := NewAnySet(s.eq) for elt := range rhs.slice { if s.Exist(elt) { newSet.Insert(elt) } } return newSet } // Remove removes `val` from `s` func (s *AnySet) Remove(val any) { for i, a := range s.slice { if s.eq(a, val) { s.slice = slices.Delete(s.slice, i, i+1) return } } } // Difference returns the difference of two sets func (s *AnySet) Difference(rhs *AnySet) *AnySet { newSet := NewAnySet(s.eq) diffSet := NewAnySet(s.eq, rhs.slice...) for elt := range s.slice { if !diffSet.Exist(elt) { newSet.Insert(elt) } } return newSet } // Slice converts the set to a slice func (s *AnySet) Slice() []any { return slices.Clone(s.slice) } tiup-1.16.3/pkg/set/any_set_test.go000066400000000000000000000015431505422223000171650ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package set import ( "reflect" "testing" "github.com/stretchr/testify/require" ) func TestAnySet(t *testing.T) { set := NewAnySet(reflect.DeepEqual) set.Insert(true) set.Insert(9527) require.Equal(t, true, set.slice[0]) require.Equal(t, true, set.Slice()[0]) require.Equal(t, 9527, set.slice[1]) require.Equal(t, 9527, set.Slice()[1]) } tiup-1.16.3/pkg/set/string_set.go000066400000000000000000000034521505422223000166460ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package set // StringSet is a string set. type StringSet map[string]struct{} // NewStringSet builds a string set. func NewStringSet(ss ...string) StringSet { set := make(StringSet) for _, s := range ss { set.Insert(s) } return set } // Exist checks whether `val` exists in `s`. func (s StringSet) Exist(val string) bool { _, ok := s[val] return ok } // Insert inserts `val` into `s`. func (s StringSet) Insert(val string) { s[val] = struct{}{} } // Join add all elements of `add` to `s`. func (s StringSet) Join(add StringSet) StringSet { for elt := range add { s.Insert(elt) } return s } // Intersection returns the intersection of two sets func (s StringSet) Intersection(rhs StringSet) StringSet { newSet := NewStringSet() for elt := range s { if rhs.Exist(elt) { newSet.Insert(elt) } } return newSet } // Remove removes `val` from `s` func (s StringSet) Remove(val string) { delete(s, val) } // Difference returns the difference of two sets func (s StringSet) Difference(rhs StringSet) StringSet { newSet := NewStringSet() for elt := range s { if !rhs.Exist(elt) { newSet.Insert(elt) } } return newSet } // Slice converts the set to a slice func (s StringSet) Slice() []string { res := make([]string, 0) for val := range s { res = append(res, val) } return res } tiup-1.16.3/pkg/set/string_set_test.go000066400000000000000000000030421505422223000177000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package set import ( "fmt" "testing" "github.com/stretchr/testify/require" ) func TestStringSet(t *testing.T) { set := NewStringSet() vals := []string{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"} for i := range vals { set.Insert(vals[i]) set.Insert(vals[i]) set.Insert(vals[i]) set.Insert(vals[i]) set.Insert(vals[i]) } require.Equal(t, len(vals), len(set)) for i := range vals { require.True(t, set.Exist(vals[i])) } require.False(t, set.Exist("11")) set = NewStringSet("1", "2", "3", "4", "5", "6") for i := 1; i < 7; i++ { require.True(t, set.Exist(fmt.Sprintf("%d", i))) } require.False(t, set.Exist("7")) s1 := NewStringSet("1", "2", "3") s2 := NewStringSet("4", "2", "3") s3 := s1.Intersection(s2) require.Equal(t, NewStringSet("2", "3"), s3) s4 := NewStringSet("4", "5", "3") require.Equal(t, NewStringSet("3"), s3.Intersection(s4)) s5 := NewStringSet("4", "5") require.Equal(t, NewStringSet(), s3.Intersection(s5)) s6 := NewStringSet() require.Equal(t, NewStringSet(), s3.Intersection(s6)) } tiup-1.16.3/pkg/tidbver/000077500000000000000000000000001505422223000147765ustar00rootroot00000000000000tiup-1.16.3/pkg/tidbver/tidbver.go000066400000000000000000000177361505422223000170020ustar00rootroot00000000000000// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tidbver import ( "strings" "golang.org/x/mod/semver" ) // warning: invalid semantic version string is considered less than a valid one when using semver.Compare // TiDBSupportSecureBoot return if given version of TiDB support secure boot func TiDBSupportSecureBoot(version string) bool { return semver.Compare(version, "v5.3.0") >= 0 || strings.Contains(version, "nightly") } // TiDBSupportTiproxy return if given version of TiDB support tiproxy func TiDBSupportTiproxy(version string) bool { return semver.Compare(version, "v6.4.0") >= 0 || strings.Contains(version, "nightly") } // TiDBSupportUpgradeAPI return if given version of TiDB support upgrade API func TiDBSupportUpgradeAPI(version string) bool { return semver.Compare(version, "v7.4.0") >= 0 || (semver.MajorMinor(version) == "v7.1" && semver.Compare(version, "v7.1.2") >= 0) || strings.Contains(version, "nightly") } // TiKVSupportAdvertiseStatusAddr return if given version of TiKV support --advertise-status-addr func TiKVSupportAdvertiseStatusAddr(version string) bool { // TiKV support --advertise-status-addr since v4.0.1 return semver.Compare(version, "v4.0.1") >= 0 || strings.Contains(version, "nightly") } // TiFlashSupportTLS return if given version of TiFlash support TLS func TiFlashSupportTLS(version string) bool { return semver.Compare(version, "v4.0.5") >= 0 || strings.Contains(version, "nightly") } // TiFlashSupportAdvertiseStatusAddr return if given version of TiFlash support --advertise-status-addr func TiFlashSupportAdvertiseStatusAddr(version string) bool { // TiFlash support --advertise-status-addr since v4.0.5 return semver.Compare(version, "v4.0.5") >= 0 || strings.Contains(version, "nightly") } // TiFlashSupportMultiDisksDeployment return if given version of TiFlash support multi-disks deployment func TiFlashSupportMultiDisksDeployment(version string) bool { // https://github.com/pingcap/tiup/pull/931 return semver.Compare(version, "v4.0.9") >= 0 || strings.Contains(version, "nightly") } // TiFlashRequireCPUFlagAVX2 return if given version of TiFlash requires AVX2 CPU flags func TiFlashRequireCPUFlagAVX2(version string) bool { // https://github.com/pingcap/tiup/pull/2054 return semver.Compare(version, "v6.3.0") >= 0 || strings.Contains(version, "nightly") } // TiFlashDeprecatedUsersConfig return if given version of TiFlash deprecated users.* config func TiFlashDeprecatedUsersConfig(version string) bool { // https://github.com/pingcap/tiup/pull/1211 return semver.Compare(version, "v4.0.12") >= 0 && version != "v5.0.0-rc" || strings.Contains(version, "nightly") } // TiFlashNotNeedHTTPPortConfig return if given version of TiFlash do not need http_port config func TiFlashNotNeedHTTPPortConfig(version string) bool { return semver.Compare(version, "v7.1.0") >= 0 || strings.Contains(version, "nightly") } // TiFlashRequiresTCPPortConfig return if given version of TiFlash requires tcp_port config. // TiFlash 7.1.0 and later versions won't listen to tpc_port if the config is not given, which is recommended. // However this config is required for pre-7.1.0 versions because TiFlash will listen to it anyway, // and we must make sure the port is being configured as specified in the topology file, // otherwise multiple TiFlash instances will conflict. func TiFlashRequiresTCPPortConfig(version string) bool { return semver.Compare(version, "v7.1.0") < 0 && !strings.Contains(version, "nightly") } // TiFlashNotNeedSomeConfig return if given version of TiFlash do not need some config like runAsDaemon func TiFlashNotNeedSomeConfig(version string) bool { // https://github.com/pingcap/tiup/pull/1673 return semver.Compare(version, "v5.4.0") >= 0 || strings.Contains(version, "nightly") } // TiFlashPlaygroundNewStartMode return true if the given version of TiFlash could be started // with the new implementation in TiUP playground. func TiFlashPlaygroundNewStartMode(version string) bool { return semver.Compare(version, "v7.1.0") >= 0 || strings.Contains(version, "nightly") } // PDSupportMicroservices returns true if the given version of PD supports microservices. func PDSupportMicroservices(version string) bool { return semver.Compare(version, "v7.3.0") >= 0 || strings.Contains(version, "nightly") } // PDSupportMicroservicesWithName return if the given version of PD supports microservices with name. func PDSupportMicroservicesWithName(version string) bool { return semver.Compare(version, "v8.3.0") >= 0 || strings.Contains(version, "nightly") } // PDSupportReadyAPI return if the given version of PD supports new ready api for checking initial region loading. func PDSupportReadyAPI(version string) bool { return semver.Compare(version, "v8.5.2") >= 0 || strings.Contains(version, "nightly") } // TiCDCSupportConfigFile return if given version of TiCDC support config file func TiCDCSupportConfigFile(version string) bool { // config support since v4.0.13, ignore v5.0.0-rc return semver.Compare(version, "v4.0.13") >= 0 && version != "v5.0.0-rc" || strings.Contains(version, "nightly") } // TiCDCSupportSortOrDataDir return if given version of TiCDC support --sort-dir or --data-dir func TiCDCSupportSortOrDataDir(version string) bool { // config support since v4.0.13, ignore v5.0.0-rc return semver.Compare(version, "v4.0.13") >= 0 && version != "v5.0.0-rc" || strings.Contains(version, "nightly") } // TiCDCSupportDataDir return if given version of TiCDC support --data-dir func TiCDCSupportDataDir(version string) bool { // TiCDC support --data-dir since v4.0.14 and v5.0.3 if semver.Compare(version, "v5.0.3") >= 0 || strings.Contains(version, "nightly") { return true } return semver.Major(version) == "v4" && semver.Compare(version, "v4.0.14") >= 0 } // TiCDCSupportClusterID return if the given version of TiCDC support --cluster-id param to identify TiCDC cluster func TiCDCSupportClusterID(version string) bool { return semver.Compare(version, "v6.2.0") >= 0 || strings.Contains(version, "nightly") } // TiCDCSupportRollingUpgrade return if the given version of TiCDC support rolling upgrade // TiCDC support graceful rolling upgrade since v6.3.0 func TiCDCSupportRollingUpgrade(version string) bool { return semver.Compare(version, "v6.3.0") >= 0 || strings.Contains(version, "nightly") } // TiCDCUpgradeBeforePDTiKVTiDB return if the given version of TiCDC should upgrade TiCDC before PD and TiKV func TiCDCUpgradeBeforePDTiKVTiDB(version string) bool { return semver.Compare(version, "v5.1.0") >= 0 || strings.Contains(version, "nightly") } // NgMonitorDeployByDefault return if given version of TiDB cluster should contain ng-monitoring func NgMonitorDeployByDefault(version string) bool { return semver.Compare(version, "v5.4.0") >= 0 || strings.Contains(version, "nightly") } // PrometheusHasTiKVAccelerateRules return if given version of Prometheus has TiKV accelerateRules func PrometheusHasTiKVAccelerateRules(version string) bool { // tikv.accelerate.rules.yml was first introduced in v4.0.0 return semver.Compare(version, "v4.0.0") >= 0 || strings.Contains(version, "nightly") } // DMSupportDeploy return if given version of DM is supported bu tiup-dm func DMSupportDeploy(version string) bool { // tiup-dm only support version not less than v2.0 return semver.Compare(version, "v2.0.0") >= 0 || strings.Contains(version, "nightly") } // TiKVCDCSupportDeploy return if given version of TiDB/TiKV cluster is supported func TiKVCDCSupportDeploy(version string) bool { // TiKV-CDC only support TiKV version not less than v6.2.0 return semver.Compare(version, "v6.2.0") >= 0 || strings.Contains(version, "nightly") } tiup-1.16.3/pkg/tui/000077500000000000000000000000001505422223000141405ustar00rootroot00000000000000tiup-1.16.3/pkg/tui/cliutil.go000066400000000000000000000123531505422223000161400ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tui import ( "bytes" "fmt" "os" "path/filepath" "strings" "text/template" "github.com/joomcode/errorx" "github.com/pingcap/tiup/pkg/localdata" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) var ( errNS = errorx.NewNamespace("tui") errMismatchArgs = errNS.NewType("mismatch_args", utils.ErrTraitPreCheck) errOperationAbort = errNS.NewType("operation_aborted", utils.ErrTraitPreCheck) ) var templateFuncs = template.FuncMap{ "OsArgs": OsArgs, "OsArgs0": OsArgs0, } // FIXME: We should use TiUP's arg0 instead of hardcode var arg0 = "tiup cluster" // RegisterArg0 register arg0 func RegisterArg0(s string) { arg0 = s } func args() []string { // if running in TiUP component mode if wd := os.Getenv(localdata.EnvNameTiUPVersion); wd != "" { return append([]string{arg0}, os.Args[1:]...) } return os.Args } // OsArgs return the whole command line that user inputs, e.g. tiup deploy --xxx, or tiup cluster deploy --xxx func OsArgs() string { return strings.Join(args(), " ") } // OsArgs0 return the command name that user inputs, e.g. tiup, or tiup cluster. func OsArgs0() string { if strings.Contains(args()[0], " ") { return args()[0] } return filepath.Base(args()[0]) } func init() { AddColorFunctions(func(name string, f any) { templateFuncs[name] = f }) } // CheckCommandArgsAndMayPrintHelp checks whether user passes enough number of arguments. // If insufficient number of arguments are passed, an error with proper suggestion will be raised. // When no argument is passed, command help will be printed and no error will be raised. func CheckCommandArgsAndMayPrintHelp(cmd *cobra.Command, args []string, minArgs int) (shouldContinue bool, err error) { if minArgs == 0 { return true, nil } lenArgs := len(args) if lenArgs == 0 { return false, cmd.Help() } if lenArgs < minArgs { return false, errMismatchArgs. New("Expect at least %d arguments, but received %d arguments", minArgs, lenArgs). WithProperty(SuggestionFromString(cmd.UsageString())) } return true, nil } func formatSuggestion(templateStr string, data any) string { t := template.Must(template.New("suggestion").Funcs(templateFuncs).Parse(templateStr)) var buf bytes.Buffer if err := t.Execute(&buf, data); err != nil { panic(err) } return buf.String() } // SuggestionFromString creates a suggestion from string. // Usage: SomeErrorX.WithProperty(SuggestionFromString(..)) func SuggestionFromString(str string) (errorx.Property, string) { return utils.ErrPropSuggestion, strings.TrimSpace(str) } // SuggestionFromTemplate creates a suggestion from go template. Colorize function and some other utilities // are available. // Usage: SomeErrorX.WithProperty(SuggestionFromTemplate(..)) func SuggestionFromTemplate(templateStr string, data any) (errorx.Property, string) { return SuggestionFromString(formatSuggestion(templateStr, data)) } // SuggestionFromFormat creates a suggestion from a format. // Usage: SomeErrorX.WithProperty(SuggestionFromFormat(..)) func SuggestionFromFormat(format string, a ...any) (errorx.Property, string) { s := fmt.Sprintf(format, a...) return SuggestionFromString(s) } // BeautifyCobraUsageAndHelp beautifies cobra usages and help. func BeautifyCobraUsageAndHelp(rootCmd *cobra.Command) { s := `Usage:{{if .Runnable}} {{ColorCommand}}{{tiupCmdLine .UseLine}}{{ColorReset}}{{end}}{{if .HasAvailableSubCommands}} {{ColorCommand}}{{tiupCmdPath .Use}} [command]{{ColorReset}}{{end}}{{if gt (len .Aliases) 0}} Aliases: {{ColorCommand}}{{.NameAndAliases}}{{ColorReset}}{{end}}{{if .HasExample}} Examples: {{.Example}}{{end}}{{if .HasAvailableSubCommands}} Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} Flags: {{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} Global Flags: {{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} Use "{{ColorCommand}}{{tiupCmdPath .Use}} help [command]{{ColorReset}}" for more information about a command.{{end}} ` cobra.AddTemplateFunc("tiupCmdLine", cmdLine) cobra.AddTemplateFunc("tiupCmdPath", cmdPath) rootCmd.SetUsageTemplate(s) } // cmdLine is a customized cobra.Command.UseLine() func cmdLine(useline string) string { i := strings.Index(useline, " ") if i > 0 { return OsArgs0() + useline[i:] } return useline } // cmdPath is a customized cobra.Command.CommandPath() func cmdPath(use string) string { if strings.Contains(use, " ") { use = OsArgs0() } return use } tiup-1.16.3/pkg/tui/color.go000066400000000000000000000047661505422223000156220ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tui // A set of predefined color palettes. You should only reference a color in this palette so that a color // change can take effect globally. import ( "strings" "github.com/fatih/color" "github.com/spf13/cobra" ) var ( // ColorErrorMsg is the ansi color formatter for error messages ColorErrorMsg = color.New(color.FgHiRed) // ColorSuccessMsg is the ansi color formatter for success messages ColorSuccessMsg = color.New(color.FgHiGreen) // ColorWarningMsg is the ansi color formatter for warning messages ColorWarningMsg = color.New(color.FgHiYellow) // ColorCommand is the ansi color formatter for commands ColorCommand = color.New(color.FgHiBlue, color.Bold) // ColorKeyword is the ansi color formatter for cluster name ColorKeyword = color.New(color.FgHiBlue, color.Bold) ) func newColorizeFn(c *color.Color) func() string { const sep = "----" seq := c.Sprint(sep) if len(seq) == len(sep) { return func() string { return "" } } colorSeq := strings.Split(seq, sep)[0] return func() string { return colorSeq } } func newColorResetFn() func() string { const sep = "----" seq := color.New(color.FgWhite).Sprint(sep) if len(seq) == len(sep) { return func() string { return "" } } colorResetSeq := strings.Split(seq, sep)[1] return func() string { return colorResetSeq } } // AddColorFunctions invokes callback for each colorize functions. func AddColorFunctions(addCallback func(string, any)) { addCallback("ColorErrorMsg", newColorizeFn(ColorErrorMsg)) addCallback("ColorSuccessMsg", newColorizeFn(ColorSuccessMsg)) addCallback("ColorWarningMsg", newColorizeFn(ColorWarningMsg)) addCallback("ColorCommand", newColorizeFn(ColorCommand)) addCallback("ColorKeyword", newColorizeFn(ColorKeyword)) addCallback("ColorReset", newColorResetFn()) } // AddColorFunctionsForCobra adds colorize functions to cobra, so that they can be used in usage or help. func AddColorFunctionsForCobra() { AddColorFunctions(func(name string, f any) { cobra.AddTemplateFunc(name, f) }) } tiup-1.16.3/pkg/tui/colorstr/000077500000000000000000000000001505422223000160075ustar00rootroot00000000000000tiup-1.16.3/pkg/tui/colorstr/builtin_tokens.go000066400000000000000000000014251505422223000213710ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package colorstr import ( "fmt" "github.com/mitchellh/colorstring" ) func init() { // Register tiup specific color tokens DefaultTokens.Colors["tiup_command"] = fmt.Sprintf("%s;%s", colorstring.DefaultColors["light_cyan"], colorstring.DefaultColors["bold"], ) } tiup-1.16.3/pkg/tui/colorstr/color.go000066400000000000000000000063601505422223000174610ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // Package colorstr interprets the input format containing color tokens like `[blue]hello [red]world` // as the text "hello world" in two colors. // // Just like tokens in the fmt package (e.g. '%s'), color tokens will only be effective when specified // as the format parameter. Tokens not in the format parameter will not be interpreted. // // colorstr.DefaultTokens.Printf("[blue]hello") ==> (a blue hello) // colorstr.DefaultTokens.Printf("[ahh]") ==> "[ahh]" // // Color tokens in the Print arguments will never be interpreted. It can be useful to pass user inputs there. package colorstr import ( "fmt" "io" "github.com/mitchellh/colorstring" ) type colorTokens struct { colorstring.Colorize } // Note: Print, Println, Fprint, Fprintln are intentionally not implemented, as we would like to // limit the usage of color token to be only placed in the "format" part. // Printf is a convenience wrapper for fmt.Printf with support for color codes. // Only color codes in the format param will be respected. func (c colorTokens) Printf(format string, a ...any) (n int, err error) { return fmt.Printf(c.Color(format), a...) } // Fprintf is a convenience wrapper for fmt.Fprintf with support for color codes. // Only color codes in the format param will be respected. func (c colorTokens) Fprintf(w io.Writer, format string, a ...any) (n int, err error) { return fmt.Fprintf(w, c.Color(format), a...) } // Sprintf is a convenience wrapper for fmt.Sprintf with support for color codes. // Only color codes in the format param will be respected. func (c colorTokens) Sprintf(format string, a ...any) string { return fmt.Sprintf(c.Color(format), a...) } // DefaultTokens uses default color tokens. var DefaultTokens = (func() colorTokens { // TODO: Respect NO_COLOR env // TODO: Add more color tokens here colors := make(map[string]string) for k, v := range colorstring.DefaultColors { colors[k] = v } return colorTokens{ Colorize: colorstring.Colorize{ Colors: colors, Disable: false, Reset: true, }, } })() // Printf is a convenience wrapper for fmt.Printf with support for color codes. // Only color codes in the format param will be respected. func Printf(format string, a ...any) (n int, err error) { return DefaultTokens.Printf(format, a...) } // Fprintf is a convenience wrapper for fmt.Fprintf with support for color codes. // Only color codes in the format param will be respected. func Fprintf(w io.Writer, format string, a ...any) (n int, err error) { return DefaultTokens.Fprintf(w, format, a...) } // Sprintf is a convenience wrapper for fmt.Sprintf with support for color codes. // Only color codes in the format param will be respected. func Sprintf(format string, a ...any) string { return DefaultTokens.Sprintf(format, a...) } tiup-1.16.3/pkg/tui/colorstr/color_test.go000066400000000000000000000024211505422223000205120ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package colorstr import ( "testing" "github.com/stretchr/testify/require" ) func TestDefaultTokens(t *testing.T) { require.Equal(t, "[blue", DefaultTokens.Sprintf("[blue")) require.Equal(t, "hello", DefaultTokens.Sprintf("hello")) require.Equal(t, "\x1B[34mhello\x1B[0m", DefaultTokens.Sprintf("[blue]hello")) require.Equal(t, "\x1B[34mhello\x1B[0m\x1B[0m", DefaultTokens.Sprintf("[blue]hello[reset]")) require.Equal(t, "\x1B[34mhello\x1B[0mfoo\x1B[0m", DefaultTokens.Sprintf("[blue]hello[reset]foo")) require.Equal(t, "\x1B[34mhello\x1B[31mfoo\x1B[0m", DefaultTokens.Sprintf("[blue]hello[red]foo")) require.Equal(t, "\x1B[34mhello [blue]\x1B[0m", DefaultTokens.Sprintf("[blue]hello %s", "[blue]")) require.Equal(t, "[ahh]hello", DefaultTokens.Sprintf("[ahh]hello")) } tiup-1.16.3/pkg/tui/colorstr/test_util.go000066400000000000000000000022261505422223000203540ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package colorstr import ( "testing" "github.com/stretchr/testify/require" ) // RequireEqualColorToken compares whether the actual string is equal to the expected string after color processing. func RequireEqualColorToken(t *testing.T, expectColorTokens string, actualString string) { require.Equal(t, DefaultTokens.Color(expectColorTokens), actualString) } // RequireNotEqualColorToken compares whether the actual string is not equal to the expected string after color processing. func RequireNotEqualColorToken(t *testing.T, expectColorTokens string, actualString string) { require.NotEqual(t, DefaultTokens.Color(expectColorTokens), actualString) } tiup-1.16.3/pkg/tui/colorstr/test_util_test.go000066400000000000000000000016541505422223000214170ustar00rootroot00000000000000// Copyright 2024 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package colorstr import ( "testing" ) func TestRequireEqualColorToken(t *testing.T) { RequireEqualColorToken(t, "[red]hello", DefaultTokens.Color("[red]hello")) RequireNotEqualColorToken(t, "[yellow]hello", DefaultTokens.Color("[red]hello")) RequireEqualColorToken(t, "[red]hello[reset]", DefaultTokens.Color("[red]hello[reset]")) RequireNotEqualColorToken(t, "[red]hello", DefaultTokens.Color("[red]hello[reset]")) } tiup-1.16.3/pkg/tui/progress/000077500000000000000000000000001505422223000160045ustar00rootroot00000000000000tiup-1.16.3/pkg/tui/progress/display_props.go000066400000000000000000000044441505422223000212310ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package progress import ( "encoding/json" "fmt" "strings" ) // Mode determines how the progress bar is rendered type Mode int const ( // ModeSpinner renders a Spinner ModeSpinner Mode = iota // ModeProgress renders a ProgressBar. Not supported yet. ModeProgress // ModeDone renders as "Done" message. ModeDone // ModeError renders as "Error" message. ModeError ) // MarshalJSON implements JSON marshaler func (m Mode) MarshalJSON() ([]byte, error) { var s string switch m { case ModeSpinner: s = "running" case ModeProgress: s = "progress" case ModeDone: s = "done" case ModeError: s = "error" default: s = "unknown" } return json.Marshal(s) } // UnmarshalJSON implements JSON unmarshaler func (m *Mode) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { return err } switch strings.ToLower(s) { case "spinner", "running": // keep "spinner" for compatibility *m = ModeSpinner case "progress": *m = ModeProgress case "done": *m = ModeDone case "error": *m = ModeError default: panic("unknown mode") } return nil } // String implements string func (m Mode) String() string { var s string switch m { case ModeSpinner: s = "running" case ModeProgress: s = "progress" case ModeDone: s = "done" case ModeError: s = "error" default: s = "unknown" } return s } // DisplayProps controls the display of the progress bar. type DisplayProps struct { Prefix string `json:"prefix,omitempty"` Suffix string `json:"suffix,omitempty"` // If `Mode == Done / Error`, Suffix is not printed Mode Mode `json:"mode,omitempty"` Detail string `json:"detail,omitempty"` } // String implements string func (dp *DisplayProps) String() string { return fmt.Sprintf( "(%s) %s: %s", dp.Mode, dp.Prefix, dp.Suffix, ) } tiup-1.16.3/pkg/tui/progress/example_single_bar_test.go000066400000000000000000000027021505422223000232130ustar00rootroot00000000000000package progress_test import ( "errors" "strconv" "testing" "time" "github.com/pingcap/tiup/pkg/tui/progress" ) func ExampleSingleBar() { b := progress.NewSingleBar("Prefix") b.UpdateDisplay(&progress.DisplayProps{ Prefix: "Prefix", Suffix: "Suffix", }) n := 3 go func() { time.Sleep(time.Second) for i := range n { b.UpdateDisplay(&progress.DisplayProps{ Prefix: "Prefix" + strconv.Itoa(i), Suffix: "Suffix" + strconv.Itoa(i), }) time.Sleep(time.Second) } }() b.StartRenderLoop() time.Sleep(time.Second * time.Duration(n+1)) b.UpdateDisplay(&progress.DisplayProps{ Mode: progress.ModeDone, // Mode: progress.ModeError, Prefix: "Prefix", }) b.StopRenderLoop() } func ExampleSingleBar_err() { b := progress.NewSingleBar("Prefix") b.UpdateDisplay(&progress.DisplayProps{ Prefix: "Prefix", Suffix: "Suffix", }) n := 3 go func() { time.Sleep(time.Second) for i := range n { b.UpdateDisplay(&progress.DisplayProps{ Prefix: "Prefix" + strconv.Itoa(i), Suffix: "Suffix" + strconv.Itoa(i), }) time.Sleep(time.Second) } }() b.StartRenderLoop() time.Sleep(time.Second * time.Duration(n+1)) b.UpdateDisplay(&progress.DisplayProps{ Mode: progress.ModeError, Prefix: "Prefix", Detail: errors.New("expected failure").Error(), }) b.StopRenderLoop() } func TestExampleOutput(t *testing.T) { if !testing.Verbose() { return } ExampleSingleBar() ExampleSingleBar_err() } tiup-1.16.3/pkg/tui/progress/multi_bar.go000066400000000000000000000050641505422223000203160ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package progress import ( "bufio" "fmt" "os" "strings" "github.com/mattn/go-runewidth" ) // MultiBarItem controls a bar item inside MultiBar. type MultiBarItem struct { core singleBarCore } // UpdateDisplay updates the display property of this bar item. // This function is thread safe. func (i *MultiBarItem) UpdateDisplay(newDisplay *DisplayProps) { i.core.displayProps.Store(newDisplay) } // MultiBar renders multiple progress bars. type MultiBar struct { prefix string bars []*MultiBarItem renderer *renderer } // NewMultiBar creates a new MultiBar. func NewMultiBar(prefix string) *MultiBar { b := &MultiBar{ prefix: prefix, bars: make([]*MultiBarItem, 0), renderer: newRenderer(), } b.renderer.renderFn = b.render return b } // AddBar adds a new bar item. // This function is not thread safe. Must be called before render loop is started. func (b *MultiBar) AddBar(prefix string) *MultiBarItem { i := &MultiBarItem{ core: newSingleBarCore(prefix), } b.bars = append(b.bars, i) return i } // StartRenderLoop starts the render loop. // This function is thread safe. func (b *MultiBar) StartRenderLoop() { b.preRender() b.renderer.startRenderLoop() } // StopRenderLoop stops the render loop. // This function is thread safe. func (b *MultiBar) StopRenderLoop() { b.renderer.stopRenderLoop() } func (b *MultiBar) preRender() { // Preserve space for the bar fmt.Print(strings.Repeat("\n", len(b.bars)+1)) } func (b *MultiBar) render() { f := bufio.NewWriter(os.Stdout) y := int(termSizeHeight.Load()) - 1 movedY := 0 for i := len(b.bars) - 1; i >= 0; i-- { moveCursorUp(f, 1) y-- movedY++ bar := b.bars[i] moveCursorToLineStart(f) clearLine(f) bar.core.renderTo(f) if y == 0 { break } } // render multi bar prefix if y > 0 { moveCursorUp(f, 1) movedY++ moveCursorToLineStart(f) clearLine(f) width := int(termSizeWidth.Load()) prefix := runewidth.Truncate(b.prefix, width, "...") _, _ = fmt.Fprint(f, prefix) } moveCursorDown(f, movedY) moveCursorToLineStart(f) clearLine(f) _ = f.Flush() } tiup-1.16.3/pkg/tui/progress/progress.go000066400000000000000000000023441505422223000202020ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package progress import ( "fmt" "os" "time" "github.com/fatih/color" ) var ( spinnerText = []rune("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") ) var ( colorDone = color.New(color.FgHiGreen) colorError = color.New(color.FgHiRed) colorSpinner = color.New(color.FgHiCyan) ) var refreshRate = time.Millisecond * 50 const ( doneTail = "Done" errorTail = "Error" ) func init() { v := os.Getenv("TIUP_CLUSTER_PROGRESS_REFRESH_RATE") if v != "" { d, err := time.ParseDuration(v) if err != nil { fmt.Println("ignore invalid refresh rate: ", v) return } refreshRate = d } } // Bar controls how a bar is displayed, for both single bar or multi bar item. type Bar interface { UpdateDisplay(newDisplay *DisplayProps) } tiup-1.16.3/pkg/tui/progress/renderer.go000066400000000000000000000031471505422223000201460ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package progress import ( "time" "go.uber.org/atomic" ) type renderer struct { isUpdaterRunning atomic.Bool stopChan chan struct{} stopFinishedChan chan struct{} renderFn func() } func newRenderer() *renderer { return &renderer{ isUpdaterRunning: atomic.Bool{}, stopChan: nil, stopFinishedChan: nil, renderFn: nil, } } func (r *renderer) startRenderLoop() { if r.renderFn == nil { panic("renderFn must be set") } if !r.isUpdaterRunning.CompareAndSwap(false, true) { return } r.stopChan = make(chan struct{}) r.stopFinishedChan = make(chan struct{}) go r.renderLoopFn() } func (r *renderer) stopRenderLoop() { if !r.isUpdaterRunning.CompareAndSwap(true, false) { return } r.stopChan <- struct{}{} close(r.stopChan) r.stopChan = nil <-r.stopFinishedChan close(r.stopFinishedChan) r.stopFinishedChan = nil } func (r *renderer) renderLoopFn() { ticker := time.NewTicker(refreshRate) defer ticker.Stop() for { select { case <-ticker.C: r.renderFn() case <-r.stopChan: r.renderFn() r.stopFinishedChan <- struct{}{} return } } } tiup-1.16.3/pkg/tui/progress/single_bar.go000066400000000000000000000076211505422223000204460ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package progress import ( "bufio" "fmt" "io" "os" "github.com/fatih/color" "github.com/mattn/go-runewidth" "go.uber.org/atomic" ) type singleBarCore struct { displayProps atomic.Value spinnerFrame int } func (b *singleBarCore) renderDoneOrError(w io.Writer, dp *DisplayProps) { width := int(termSizeWidth.Load()) var tail, detail string var tailColor *color.Color switch dp.Mode { case ModeDone: tail = doneTail tailColor = colorDone case ModeError: tail = errorTail tailColor = colorError default: panic("Unexpected dp.Mode") } var displayPrefix string midWidth := 1 + 3 + 1 + len(tail) prefixWidth := runewidth.StringWidth(dp.Prefix) if midWidth+prefixWidth <= width || midWidth > width { displayPrefix = dp.Prefix } else { displayPrefix = runewidth.Truncate(dp.Prefix, width-prefixWidth, "") } if len(dp.Detail) > 0 { detail = ": " + dp.Detail } _, _ = fmt.Fprintf(w, "%s ... %s%s", displayPrefix, tailColor.Sprint(tail), detail) } func (b *singleBarCore) renderSpinner(w io.Writer, dp *DisplayProps) { width := int(termSizeWidth.Load()) var displayPrefix, displaySuffix string midWidth := 1 + 3 + 1 + 1 + 1 prefixWidth := runewidth.StringWidth(dp.Prefix) suffixWidth := runewidth.StringWidth(dp.Suffix) switch { case midWidth+prefixWidth+suffixWidth <= width || midWidth > width: // If screen is too small, do not fit it any more. displayPrefix = dp.Prefix displaySuffix = dp.Suffix case midWidth+prefixWidth <= width: displayPrefix = dp.Prefix displaySuffix = runewidth.Truncate(dp.Suffix, width-midWidth-prefixWidth, "...") default: displayPrefix = runewidth.Truncate(dp.Prefix, width-midWidth, "") displaySuffix = "" } _, _ = fmt.Fprintf(w, "%s ... %s %s", displayPrefix, colorSpinner.Sprintf("%c", spinnerText[b.spinnerFrame]), displaySuffix) b.spinnerFrame = (b.spinnerFrame + 1) % len(spinnerText) } func (b *singleBarCore) renderTo(w io.Writer) { dp := (b.displayProps.Load()).(*DisplayProps) if dp.Mode == ModeDone || dp.Mode == ModeError { b.renderDoneOrError(w, dp) } else { b.renderSpinner(w, dp) } } func newSingleBarCore(prefix string) singleBarCore { c := singleBarCore{ displayProps: atomic.Value{}, spinnerFrame: 0, } c.displayProps.Store(&DisplayProps{ Prefix: prefix, Mode: ModeSpinner, }) return c } // SingleBar renders single progress bar. type SingleBar struct { core singleBarCore renderer *renderer } // NewSingleBar creates a new SingleBar. func NewSingleBar(prefix string) *SingleBar { b := &SingleBar{ core: newSingleBarCore(prefix), renderer: newRenderer(), } b.renderer.renderFn = b.render return b } // UpdateDisplay updates the display property of this single bar. // This function is thread safe. func (b *SingleBar) UpdateDisplay(newDisplay *DisplayProps) { b.core.displayProps.Store(newDisplay) } // StartRenderLoop starts the render loop. // This function is thread safe. func (b *SingleBar) StartRenderLoop() { b.preRender() b.renderer.startRenderLoop() } // StopRenderLoop stops the render loop. // This function is thread safe. func (b *SingleBar) StopRenderLoop() { b.renderer.stopRenderLoop() } func (b *SingleBar) preRender() { // Preserve space for the bar fmt.Println("") } func (b *SingleBar) render() { f := bufio.NewWriter(os.Stdout) moveCursorUp(f, 1) moveCursorToLineStart(f) clearLine(f) b.core.renderTo(f) moveCursorDown(f, 1) moveCursorToLineStart(f) _ = f.Flush() } tiup-1.16.3/pkg/tui/progress/terminal.go000066400000000000000000000026631505422223000201550ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package progress import ( "fmt" "io" "os" "os/signal" "syscall" "go.uber.org/atomic" "golang.org/x/sys/unix" ) var ( termSizeWidth = atomic.Int32{} termSizeHeight = atomic.Int32{} ) func updateTerminalSize() error { ws, err := unix.IoctlGetWinsize(syscall.Stdout, unix.TIOCGWINSZ) if err != nil { return err } termSizeWidth.Store(int32(ws.Col)) termSizeHeight.Store(int32(ws.Row)) return nil } func moveCursorUp(w io.Writer, n int) { _, _ = fmt.Fprintf(w, "\033[%dA", n) } func moveCursorDown(w io.Writer, n int) { _, _ = fmt.Fprintf(w, "\033[%dB", n) } func moveCursorToLineStart(w io.Writer) { _, _ = fmt.Fprintf(w, "\r") } func clearLine(w io.Writer) { _, _ = fmt.Fprintf(w, "\033[2K") } func init() { _ = updateTerminalSize() sigCh := make(chan os.Signal, 1) signal.Notify(sigCh, syscall.SIGWINCH) go func() { for { if _, ok := <-sigCh; !ok { return } _ = updateTerminalSize() } }() } tiup-1.16.3/pkg/tui/ssh.go000066400000000000000000000067271505422223000153000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tui import ( "os" "github.com/ScaleFT/sshkeys" "github.com/pingcap/tiup/pkg/utils" "golang.org/x/crypto/ssh" ) var ( // ErrIdentityFileReadFailed is ErrIdentityFileReadFailed ErrIdentityFileReadFailed = errNS.NewType("id_read_failed", utils.ErrTraitPreCheck) ) // SSHConnectionProps is SSHConnectionProps type SSHConnectionProps struct { Password string IdentityFile string IdentityFilePassphrase string } // ReadIdentityFileOrPassword is ReadIdentityFileOrPassword func ReadIdentityFileOrPassword(identityFilePath string, usePass bool) (*SSHConnectionProps, error) { // If identity file is not specified, prompt to read password if usePass { password := PromptForPassword("Input SSH password: ") return &SSHConnectionProps{ Password: password, }, nil } // Identity file is specified, check identity file if len(identityFilePath) > 0 && utils.IsExist(identityFilePath) { buf, err := os.ReadFile(identityFilePath) if err != nil { return nil, ErrIdentityFileReadFailed. Wrap(err, "Failed to read SSH identity file '%s'", identityFilePath). WithProperty(SuggestionFromTemplate(` Please check whether your SSH identity file {{ColorKeyword}}{{.File}}{{ColorReset}} exists and have access permission. `, map[string]string{ "File": identityFilePath, })) } // Try to decode as not encrypted _, err = ssh.ParsePrivateKey(buf) if err == nil { return &SSHConnectionProps{ IdentityFile: identityFilePath, }, nil } // Other kind of error.. e.g. not a valid SSH key if _, ok := err.(*ssh.PassphraseMissingError); !ok { return nil, ErrIdentityFileReadFailed. Wrap(err, "Failed to read SSH identity file '%s'", identityFilePath). WithProperty(SuggestionFromTemplate(` Looks like your SSH private key {{ColorKeyword}}{{.File}}{{ColorReset}} is invalid. `, map[string]string{ "File": identityFilePath, })) } // SSH key is passphrase protected passphrase := PromptForPassword("The SSH identity key is encrypted. Input its passphrase: ") if _, err := sshkeys.ParseEncryptedPrivateKey(buf, []byte(passphrase)); err != nil { return nil, ErrIdentityFileReadFailed. Wrap(err, "Failed to decrypt SSH identity file '%s'", identityFilePath) } return &SSHConnectionProps{ IdentityFile: identityFilePath, IdentityFilePassphrase: passphrase, }, nil } // No password, nor identity file were specified, check ssh-agent via the env SSH_AUTH_SOCK sshAuthSock := os.Getenv("SSH_AUTH_SOCK") if len(sshAuthSock) == 0 { return nil, ErrIdentityFileReadFailed.New("none of ssh password, identity file, SSH_AUTH_SOCK specified") } stat, err := os.Stat(sshAuthSock) if err != nil { return nil, ErrIdentityFileReadFailed.Wrap(err, "Failed to stat SSH_AUTH_SOCK file: '%s'", sshAuthSock) } if stat.Mode()&os.ModeSocket == 0 { return nil, ErrIdentityFileReadFailed.New("The SSH_AUTH_SOCK file: '%s' is not a valid unix socket file", sshAuthSock) } return &SSHConnectionProps{}, nil } tiup-1.16.3/pkg/tui/tui.go000066400000000000000000000133231505422223000152720ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package tui import ( "bufio" "fmt" "os" "strings" "syscall" "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" "github.com/fatih/color" "github.com/pingcap/tiup/pkg/utils/mock" "golang.org/x/term" ) // PrintTable accepts a matrix of strings and print them as ASCII table to terminal func PrintTable(rows [][]string, header bool) { if f := mock.On("PrintTable"); f != nil { f.(func([][]string, bool))(rows, header) return } // Print the table t := table.NewWriter() t.SetOutputMirror(os.Stdout) t.SuppressTrailingSpaces() if header { addRow(t, rows[0], true) border := make([]string, len(rows[0])) for i := range border { border[i] = strings.Repeat("-", len(rows[0][i])) } addRow(t, border, false) rows = rows[1:] } for _, row := range rows { addRow(t, row, false) } t.SetStyle(table.Style{ Name: "tiup", Box: table.BoxStyle{ BottomLeft: "", BottomRight: "", BottomSeparator: "", Left: "|", LeftSeparator: "|", MiddleHorizontal: "-", MiddleSeparator: " ", MiddleVertical: " ", PaddingLeft: "", PaddingRight: "", Right: "", RightSeparator: "", TopLeft: "", TopRight: "", TopSeparator: "", UnfinishedRow: "", }, Format: table.FormatOptions{ Header: text.FormatDefault, }, Options: table.Options{ SeparateColumns: true, }, }) t.Render() } func addRow(t table.Writer, rawLine []string, header bool) { // Convert []string to []any row := make(table.Row, len(rawLine)) for i, v := range rawLine { row[i] = v } // Add line to the table if header { t.AppendHeader(row) } else { t.AppendRow(row) } } // pre-defined ascii art strings const ( ASCIIArtWarning = ` ██ ██ █████ ██████ ███ ██ ██ ███ ██ ██████ ██ ██ ██ ██ ██ ██ ████ ██ ██ ████ ██ ██ ██ █ ██ ███████ ██████ ██ ██ ██ ██ ██ ██ ██ ██ ███ ██ ███ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ███ ███ ██ ██ ██ ██ ██ ████ ██ ██ ████ ██████ ` ) // Prompt accepts input from console by user func Prompt(prompt string) string { if prompt != "" { prompt += " " // append a whitespace } fmt.Print(prompt) reader := bufio.NewReader(os.Stdin) input, err := reader.ReadString('\n') if err != nil { return "" } return strings.TrimSuffix(input, "\n") } // PromptForConfirmYes accepts yes / no from console by user, default to No and only return true // if the user input is Yes func PromptForConfirmYes(format string, a ...any) (bool, string) { ans := Prompt(fmt.Sprintf(format, a...) + "(default=N)") switch strings.TrimSpace(strings.ToLower(ans)) { case "y", "yes": return true, ans default: return false, ans } } // PromptForConfirmNo accepts yes / no from console by user, default to Yes and only return true // if the user input is No func PromptForConfirmNo(format string, a ...any) (bool, string) { ans := Prompt(fmt.Sprintf(format, a...) + "(default=Y)") switch strings.TrimSpace(strings.ToLower(ans)) { case "n", "no": return true, ans default: return false, ans } } // PromptForConfirmOrAbortError accepts yes / no from console by user, generates AbortError if user does not input yes. func PromptForConfirmOrAbortError(format string, a ...any) error { if pass, ans := PromptForConfirmYes(format, a...); !pass { return errOperationAbort.New("Operation aborted by user (with answer '%s')", ans) } return nil } // PromptForConfirmAnswer accepts string from console by user, default to empty and only return // true if the user input is exactly the same as pre-defined answer. func PromptForConfirmAnswer(answer string, format string, a ...any) (bool, string) { ans := Prompt(fmt.Sprintf(format, a...) + fmt.Sprintf("\n(Type \"%s\" to continue)\n:", color.CyanString(answer))) if ans == answer { return true, ans } return false, ans } // PromptForAnswerOrAbortError accepts string from console by user, generates AbortError if user does // not input the pre-defined answer. func PromptForAnswerOrAbortError(answer string, format string, a ...any) error { if pass, ans := PromptForConfirmAnswer(answer, format, a...); !pass { return errOperationAbort.New("Operation aborted by user (with incorrect answer '%s')", ans) } return nil } // PromptForPassword reads a password input from console func PromptForPassword(format string, a ...any) string { defer fmt.Println("") fmt.Printf(format, a...) input, err := term.ReadPassword(syscall.Stdin) if err != nil { return "" } return strings.TrimSpace(strings.Trim(string(input), "\n")) } // OsArch builds an "os/arch" string from input, it converts some similar strings // to different words to avoid misreading when displaying in terminal func OsArch(os, arch string) string { osFmt := os archFmt := arch switch arch { case "amd64": archFmt = "x86_64" case "arm64": archFmt = "aarch64" } return fmt.Sprintf("%s/%s", osFmt, archFmt) } tiup-1.16.3/pkg/utils/000077500000000000000000000000001505422223000144775ustar00rootroot00000000000000tiup-1.16.3/pkg/utils/args.go000066400000000000000000000005531505422223000157650ustar00rootroot00000000000000package utils // RebuildArgs move "--help" or "-h" flag to the end of the arg list func RebuildArgs(args []string) []string { helpFlag := "--help" argList := []string{} for _, arg := range args { if arg == "-h" || arg == "--help" { helpFlag = arg } else { argList = append(argList, arg) } } argList = append(argList, helpFlag) return argList } tiup-1.16.3/pkg/utils/diff.go000066400000000000000000000115471505422223000157460ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "fmt" "io" "strconv" "strings" "github.com/pingcap/tiup/pkg/set" "github.com/r3labs/diff/v3" "github.com/sergi/go-diff/diffmatchpatch" ) const ( validateTagName = "validate" validateTagEditable = "editable" validateTagIgnore = "ignore" validateTagExpandable = "expandable" // r3labs/diff drops everything after the first ',' in the tag value, so we use a different // separator for the tag value and its options validateTagSeperator = ":" ) // ShowDiff write diff result into the Writer. // return false if there's no diff. func ShowDiff(t1 string, t2 string, w io.Writer) { dmp := diffmatchpatch.New() diffs := dmp.DiffMain(t1, t2, false) diffs = dmp.DiffCleanupSemantic(diffs) fmt.Fprint(w, dmp.DiffPrettyText(diffs)) } func validateExpandable(fromField, toField any) bool { fromStr, ok := fromField.(string) if !ok { return false } toStr, ok := toField.(string) if !ok { return false } tidyPaths := func(arr []string) []string { for i := range arr { arr[i] = strings.TrimSuffix(strings.TrimSpace(arr[i]), "/") } return arr } fromPaths := tidyPaths(strings.Split(fromStr, ",")) toPaths := tidyPaths(strings.Split(toStr, ",")) // The first path must be the same if len(fromPaths) > 0 && len(toPaths) > 0 && fromPaths[0] != toPaths[0] { return false } // The intersection size must be the same with from size fromSet := set.NewStringSet(fromPaths...) toSet := set.NewStringSet(toPaths...) inter := fromSet.Intersection(toSet) return len(inter) == len(fromSet) } // ValidateSpecDiff checks and validates the new spec to see if the modified // keys are all marked as editable func ValidateSpecDiff(s1, s2 any) error { differ, err := diff.NewDiffer( diff.TagName(validateTagName), diff.AllowTypeMismatch(true), ) if err != nil { return err } changelog, err := differ.Diff(s1, s2) if err != nil { return err } if len(changelog) == 0 { return nil } msg := make([]string, 0) for _, c := range changelog { if len(c.Path) > 0 { _, leafCtl := parseValidateTagValue(c.Path[len(c.Path)-1]) // c.Path will be the tag value if TagName matched on the field if c.Type == diff.UPDATE && leafCtl == validateTagEditable { // If the field is marked as editable, it is allowed to be modified no matter // its parent level element is marked as editable or not continue } pathEditable := true pathIgnore := false pathExpandable := false for _, p := range c.Path { key, ctl := parseValidateTagValue(p) if _, err := strconv.Atoi(key); err == nil { // ignore slice offset counts continue } if ctl == validateTagIgnore { pathIgnore = true continue } if ctl == validateTagExpandable { pathExpandable = validateExpandable(c.From, c.To) } if ctl != validateTagEditable { pathEditable = false } } // if the path has any ignorable item, just ignore it if pathIgnore || (pathEditable && (c.Type == diff.CREATE || c.Type == diff.DELETE)) || pathExpandable { // If *every* parent elements on the path are all marked as editable, // AND the field itself is marked as editable, it is allowed to add or delete continue } } // build error messages switch c.Type { case diff.CREATE: msg = append(msg, fmt.Sprintf("added %s with value '%v'", buildFieldPath(c.Path), c.To)) case diff.DELETE: msg = append(msg, fmt.Sprintf("removed %s with value '%v'", buildFieldPath(c.Path), c.From)) case diff.UPDATE: msg = append(msg, fmt.Sprintf("%s changed from '%v' to '%v'", buildFieldPath(c.Path), c.From, c.To)) } } if len(msg) > 0 { return fmt.Errorf("immutable field changed: %s", strings.Join(msg, ", ")) } return nil } func parseValidateTagValue(v string) (key, ctl string) { pvs := strings.Split(v, validateTagSeperator) switch len(pvs) { case 1: // if only one field is set in tag value // use it as both the field name and control command key = pvs[0] ctl = pvs[0] case 2: key = pvs[0] ctl = pvs[1] default: panic(fmt.Sprintf("invalid tag value %s for %s, only one or two fields allowed", v, validateTagName)) } return key, ctl } func buildFieldPath(rawPath []string) string { namedPath := make([]string, 0) for _, p := range rawPath { pvs := strings.Split(p, validateTagSeperator) if len(pvs) >= 1 { namedPath = append(namedPath, pvs[0]) } } return strings.Join(namedPath, ".") } tiup-1.16.3/pkg/utils/diff_test.go000066400000000000000000000301501505422223000167740ustar00rootroot00000000000000// you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "testing" "gopkg.in/yaml.v3" "github.com/stretchr/testify/require" ) type sampleDataMeta struct { IntSlice []int `yaml:"ints,omitempty"` StrSlice []string `yaml:"strs,omitempty" validate:"strs:editable"` MapSlice []map[string]any `yaml:"maps,omitempty" validate:"maps:ignore"` StrElem string `yaml:"stre" validate:"editable"` StrElem2 string `yaml:"str2,omitempty" validate:"str2:expandable"` StructSlice1 []sampleDataElem `yaml:"slice1" validate:"slice1:editable"` StructSlice2 []sampleDataElem `yaml:"slice2,omitempty"` StructSlice3 []sampleDataEditable `yaml:"slice3,omitempty" validate:"slice3:editable"` } type sampleDataElem struct { StrElem1 string `yaml:"str1" validate:"str1:editable"` StrElem2 string `yaml:"str2,omitempty" validate:"str2:editable"` IntElem int `yaml:"int"` InterfaceElem any `yaml:"interface,omitempty" validate:"interface:editable"` InterfaceSlice map[string]any `yaml:"mapslice,omitempty" validate:"mapslice:editable"` } type sampleDataEditable struct { StrElem1 string `yaml:"str1" validate:"str1:editable"` StrElem2 string `yaml:"str2,omitempty" validate:"str2:editable"` } func TestValidateSpecDiff1(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] strs: - str1 - "str2" `), &d1) require.NoError(t, err) // unchanged err = ValidateSpecDiff(d1, d1) require.NoError(t, err) // swap element order err = yaml.Unmarshal([]byte(` ints: [11, 13, 12] strs: - str2 - "str1" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // add editable element (without specifying alias) err = yaml.Unmarshal([]byte(` ints: [11, 13, 12] strs: - "str1" - str2 stre: "test1.3" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // add item to immutable element err = yaml.Unmarshal([]byte(` ints: [11, 12, 13, 14] `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) require.Equal(t, "immutable field changed: added IntSlice.3 with value '14'", err.Error()) } func TestValidateSpecDiff2(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 42 interface: "12" `), &d1) require.NoError(t, err) // change editable field of item in editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv233 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 42 interface: "12" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // change immutable field of item in editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 43 interface: "12" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) require.Equal(t, "immutable field changed: slice1.1.IntElem changed from '42' to '43'", err.Error()) // Add item with immutable field to editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 42 interface: "12" - str1: strv13 str2: strv23 int: 42 interface: "13" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) require.Equal(t, "immutable field changed: added slice1.2.IntElem with value '42'", err.Error()) // Delete item with immutable field from editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 int: 42 interface: 11 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) require.Equal(t, "immutable field changed: removed slice1.1.IntElem with value '42'", err.Error()) } func TestValidateSpecDiff3(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice2: - str1: strv11 str2: strv21 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 42 interface: "12" `), &d1) require.NoError(t, err) // change editable field of item in immutable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice2: - str1: strv11 str2: strv233 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 42 interface: "12" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // change immutable field of item in immutable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice2: - str1: strv11 str2: strv21 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 43 interface: "12" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) require.Equal(t, "immutable field changed: StructSlice2.1.IntElem changed from '42' to '43'", err.Error()) // Add item to immutable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice2: - str1: strv11 str2: strv21 int: 42 interface: 11 - str1: strv12 str2: strv22 int: 42 interface: "12" - str1: strv31 str2: strv32 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) require.Equal(t, "immutable field changed: added StructSlice2.2.str1 with value 'strv31', added StructSlice2.2.str2 with value 'strv32'", err.Error()) // Remove item from immutable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice2: - str1: strv11 str2: strv21 int: 42 interface: 11 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) require.Equal(t, "immutable field changed: removed StructSlice2.1.str1 with value 'strv12', removed StructSlice2.1.str2 with value 'strv22', removed StructSlice2.1.IntElem with value '42', removed StructSlice2.1.interface with value '12'", err.Error()) } func TestValidateSpecDiff4(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice3: - str1: strv11 str2: strv21 `), &d1) require.NoError(t, err) // Add item with only editable fields to editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice3: - str1: strv11 str2: strv21 - str1: strv21 str2: strv22 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Remove item with only editable fields from editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice3: - str1: strv21 str2: strv22 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) } func TestValidateSpecDiff5(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 interslice: - key0: 0 - str1: strv12 str2: strv22 slice2: - str1: strv13 str2: strv14 interslice: - key0: 0 `), &d1) require.NoError(t, err) // Modify item of editable slice in item of editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 interslice: - key0: 0.1 - str1: strv12 str2: strv22 interslice: - key1: 1 - key2: "v2" slice2: - str1: strv13 str2: strv14 interslice: - key0: 0 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Modify item of editable slice in item of editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 interslice: - key0: 0 - str1: strv12 str2: strv22 interslice: - key1: 1 - key2: "v2" slice2: - str1: strv13 str2: strv14 interslice: - key0: 0.2 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Add item to editable slice to item of editable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 interslice: - key0: 0 - str1: strv12 str2: strv22 interslice: - key1: 1 - key2: "v2" slice2: - str1: strv13 str2: strv14 interslice: - key0: 0 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Add item to editable slice to item of immutable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice1: - str1: strv11 str2: strv21 interslice: - key0: 0 - str1: strv12 str2: strv22 slice2: - str1: strv13 str2: strv14 interslice: - key0: 0 - key3: 3.0 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) } func TestValidateSpecDiff6(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] maps: - key0: 0 - dot.key1: 1 - dotkey.subkey.1: "1" `), &d1) require.NoError(t, err) // Modify key without dot in name, in ignorable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] maps: - key0: 1 - dot.key1: 1 - dotkey.subkey.1: "1" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Modify key with one dot in name, in ignorable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] maps: - key0: 0 - dot.key1: 11 - dotkey.subkey.1: "1" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Modify key with two dots and number in name, in ignorable slice err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] maps: - key0: 0 - dot.key1: 1 - dotkey.subkey.1: "12" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) } func TestValidateSpecDiffType(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice3: - key0: 0 `), &d1) require.NoError(t, err) // Modify key in editable map, with the same type err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice3: - key0: 1 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Modify key in editable map, with value type changed err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice3: - key0: 2.0 `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Modify key in editable map, with value type changed err = yaml.Unmarshal([]byte(` ints: [11, 12, 13] slice3: - key0: sss `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) } func TestValidateSpecDiffExpandable(t *testing.T) { var d1, d2 sampleDataMeta var err error err = yaml.Unmarshal([]byte(` str2: "/ssd0/tiflash,/ssd1/tiflash" `), &d1) require.NoError(t, err) // Expand path err = yaml.Unmarshal([]byte(` str2: "/ssd0/tiflash,/ssd1/tiflash,/ssd2/tiflash" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Expand path with non-sorted paths err = yaml.Unmarshal([]byte(` str2: "/ssd0/tiflash,/ssd2/tiflash,/ssd1/tiflash" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.NoError(t, err) // Expand path with non-sorted paths. Changing the first path is not allowed. err = yaml.Unmarshal([]byte(` str2: "/ssd1/tiflash,/ssd0/tiflash,/ssd2/tiflash" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) // Shrinking paths is not allowed err = yaml.Unmarshal([]byte(` str2: "/ssd0/tiflash" `), &d2) require.NoError(t, err) err = ValidateSpecDiff(d1, d2) require.Error(t, err) } tiup-1.16.3/pkg/utils/edit.go000066400000000000000000000022151505422223000157530ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "os" "os/exec" ) // ref: https://samrapdev.com/capturing-sensitive-input-with-editor-in-golang-from-the-cli/ // DefaultEditor is vi because we're adults ;) const DefaultEditor = "vi" // OpenFileInEditor opens filename in a text editor. func OpenFileInEditor(filename string) error { editor := os.Getenv("EDITOR") if editor == "" { editor = DefaultEditor } // Get the full executable path for the editor. executable, err := exec.LookPath(editor) if err != nil { return err } cmd := exec.Command(executable, filename) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd.Run() } tiup-1.16.3/pkg/utils/error.go000066400000000000000000000034261505422223000161640ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "fmt" "github.com/joomcode/errorx" ) var ( // ErrPropSuggestion is a property of an Error that will be printed as the suggestion. ErrPropSuggestion = errorx.RegisterProperty("suggestion") // ErrTraitPreCheck means that the Error is a pre-check error so that no error logs will be outputted directly. ErrTraitPreCheck = errorx.RegisterTrait("pre_check") ) var ( // ErrValidateChecksum is an empty HashValidationErr object, useful for type checking ErrValidateChecksum = &HashValidationErr{} ) // HashValidationErr is the error indicates a failed hash validation type HashValidationErr struct { cipher string expect string // expected hash actual string // input hash } // Error implements the error interface func (e *HashValidationErr) Error() string { return fmt.Sprintf( "%s checksum mismatch, expect: %v, got: %v", e.cipher, e.expect, e.actual, ) } // Unwrap implements the error interface func (e *HashValidationErr) Unwrap() error { return nil } // Is implements the error interface func (e *HashValidationErr) Is(target error) bool { t, ok := target.(*HashValidationErr) if !ok { return false } return (e.cipher == t.cipher || t.cipher == "") && (e.expect == t.expect || t.expect == "") && (e.actual == t.actual || t.actual == "") } tiup-1.16.3/pkg/utils/error_test.go000066400000000000000000000042251505422223000172210ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "errors" "testing" "github.com/stretchr/testify/require" ) func TestHashValidationErr(t *testing.T) { err0 := &HashValidationErr{ cipher: "sha256", expect: "hash111", actual: "hash222", } // identical errors are equal require.True(t, errors.Is(err0, err0)) require.True(t, errors.Is(ErrValidateChecksum, ErrValidateChecksum)) require.True(t, errors.Is(ErrValidateChecksum, &HashValidationErr{})) require.True(t, errors.Is(&HashValidationErr{}, ErrValidateChecksum)) // not equal for different error types require.False(t, errors.Is(err0, errors.New(""))) // default Value matches any error require.True(t, errors.Is(err0, ErrValidateChecksum)) // error with values are not matching default ones require.False(t, errors.Is(ErrValidateChecksum, err0)) err1 := &HashValidationErr{ cipher: "sha256", expect: "hash111", actual: "hash222", } require.True(t, errors.Is(err1, ErrValidateChecksum)) // errors with same values are equal require.True(t, errors.Is(err0, err1)) require.True(t, errors.Is(err1, err0)) // errors with different ciphers are not equal err1.cipher = "sha512" require.False(t, errors.Is(err0, err1)) require.False(t, errors.Is(err1, err0)) // errors with different expected hashes are not equal err1.cipher = err0.cipher require.True(t, errors.Is(err0, err1)) err1.expect = "hash1112" require.False(t, errors.Is(err0, err1)) require.False(t, errors.Is(err1, err0)) // errors with different actual hashes are not equal err1.expect = err0.expect require.True(t, errors.Is(err0, err1)) err1.actual = "hash2223" require.False(t, errors.Is(err0, err1)) require.False(t, errors.Is(err1, err0)) } tiup-1.16.3/pkg/utils/freeport.go000066400000000000000000000033171505422223000166600ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "net" "sync" "time" ) // To avoid the same port be generated twice in a short time var portCache sync.Map func getFreePort(host string, defaultPort int) (int, error) { //revive:disable if port, err := getPort(host, defaultPort); err == nil { return port, nil } else if port, err := getPort(host, 0); err == nil { return port, nil } else { return 0, err } //revive:enable } // MustGetFreePort asks the kernel for a free open port that is ready to use, if fail, panic func MustGetFreePort(host string, defaultPort int, portOffset int) int { bestPort := defaultPort + portOffset if port, err := getFreePort(host, bestPort); err == nil { return port } panic("can't get a free port") } func getPort(host string, port int) (int, error) { addr, err := net.ResolveTCPAddr("tcp", JoinHostPort(host, port)) if err != nil { return 0, err } l, err := net.ListenTCP("tcp", addr) if err != nil { return 0, err } port = l.Addr().(*net.TCPAddr).Port l.Close() key := JoinHostPort(host, port) if t, ok := portCache.Load(key); ok && t.(time.Time).Add(time.Minute).After(time.Now()) { return getPort(host, (port+1)%65536) } portCache.Store(key, time.Now()) return port, nil } tiup-1.16.3/pkg/utils/freeport_test.go000066400000000000000000000006441505422223000177170ustar00rootroot00000000000000package utils import ( "testing" "github.com/stretchr/testify/require" ) func TestGetFreePort(t *testing.T) { expected := 22334 port, err := getFreePort("127.0.0.1", expected) require.NoError(t, err) require.Equal(t, expected, port, "expect port %d", expected) port, err = getFreePort("127.0.0.1", expected) require.NoError(t, err) require.NotEqual(t, expected, port, "should not return same port twice") } tiup-1.16.3/pkg/utils/http.go000066400000000000000000000022221505422223000160030ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "bytes" "io" "mime/multipart" "net/http" ) // PostFile upload file func PostFile(reader io.Reader, url, fieldname, filename string) (*http.Response, error) { bodyBuf := &bytes.Buffer{} bodyWriter := multipart.NewWriter(bodyBuf) // this step is very important fileWriter, err := bodyWriter.CreateFormFile(fieldname, filename) if err != nil { return nil, err } _, err = io.Copy(fileWriter, reader) if err != nil { return nil, err } contentType := bodyWriter.FormDataContentType() bodyWriter.Close() resp, err := http.Post(url, contentType, bodyBuf) if err != nil { return nil, err } return resp, nil } tiup-1.16.3/pkg/utils/http_client.go000066400000000000000000000135371505422223000173540ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "context" "crypto/tls" "fmt" "io" "net" "net/http" "net/url" "os" "path/filepath" "time" ) // HTTPClient is a wrap of http.Client type HTTPClient struct { client *http.Client header http.Header } // NewHTTPClient returns a new HTTP client with timeout and HTTPS support func NewHTTPClient(timeout time.Duration, tlsConfig *tls.Config) *HTTPClient { if timeout < time.Second { timeout = 10 * time.Second // default timeout is 10s } tr := &http.Transport{ TLSClientConfig: tlsConfig, Dial: (&net.Dialer{Timeout: 3 * time.Second}).Dial, } // prefer to use the inner http proxy httpProxy := os.Getenv("TIUP_INNER_HTTP_PROXY") if len(httpProxy) == 0 { httpProxy = os.Getenv("HTTP_PROXY") } if len(httpProxy) > 0 { if proxyURL, err := url.Parse(httpProxy); err == nil { tr.Proxy = http.ProxyURL(proxyURL) } } return &HTTPClient{ client: &http.Client{ Timeout: timeout, Transport: tr, }, } } // SetRequestHeader set http request header func (c *HTTPClient) SetRequestHeader(key, value string) { if c.header == nil { c.header = http.Header{} } c.header.Add(key, value) } // Get fetch an URL with GET method and returns the response func (c *HTTPClient) Get(ctx context.Context, url string) ([]byte, error) { data, _, err := c.GetWithStatusCode(ctx, url) return data, err } // GetWithStatusCode fetch a URL with GET method and returns the response, also the status code. func (c *HTTPClient) GetWithStatusCode(ctx context.Context, url string) ([]byte, int, error) { var statusCode int req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, statusCode, err } req.Header = c.header if ctx != nil { req = req.WithContext(ctx) } res, err := c.client.Do(req) if err != nil { return nil, statusCode, err } defer res.Body.Close() data, err := checkHTTPResponse(res) return data, res.StatusCode, err } // Download fetch an URL with GET method and Download the response to filePath func (c *HTTPClient) Download(ctx context.Context, url, filePath string) error { // IsExist if IsExist(filePath) { return fmt.Errorf("target file %s already exists", filePath) } if err := MkdirAll(filepath.Dir(filePath), 0755); err != nil { return err } // create target file f, err := os.Create(filePath) if err != nil { return err } defer f.Close() req, err := http.NewRequest("GET", url, nil) if err != nil { return err } req.Header = c.header if ctx != nil { req = req.WithContext(ctx) } res, err := c.client.Do(req) if err != nil { return err } defer res.Body.Close() _, err = io.Copy(f, res.Body) if err != nil { return err } return nil } // Post send a POST request to the url and returns the response func (c *HTTPClient) Post(ctx context.Context, url string, body io.Reader) ([]byte, error) { data, _, err := c.PostWithStatusCode(ctx, url, body) return data, err } // PostWithStatusCode send a POST request to the url and returns the response, also the http status code. func (c *HTTPClient) PostWithStatusCode(ctx context.Context, url string, body io.Reader) ([]byte, int, error) { var statusCode int req, err := http.NewRequest("POST", url, body) if err != nil { return nil, statusCode, err } if c.header == nil { req.Header.Set("Content-Type", "application/json") } else { req.Header = c.header } if ctx != nil { req = req.WithContext(ctx) } res, err := c.client.Do(req) if err != nil { return nil, statusCode, err } defer res.Body.Close() data, err := checkHTTPResponse(res) return data, res.StatusCode, err } // Put send a PUT request to the url and returns the response, also the status code func (c *HTTPClient) Put(ctx context.Context, url string, body io.Reader) ([]byte, int, error) { var statusCode int req, err := http.NewRequest("PUT", url, body) if err != nil { return nil, statusCode, err } if c.header == nil { req.Header.Set("Content-Type", "application/json") } else { req.Header = c.header } if ctx != nil { req = req.WithContext(ctx) } resp, err := c.client.Do(req) if err != nil { return nil, statusCode, err } defer resp.Body.Close() b, err := checkHTTPResponse(resp) statusCode = resp.StatusCode return b, statusCode, err } // Delete send a DELETE request to the url and returns the response and status code. func (c *HTTPClient) Delete(ctx context.Context, url string, body io.Reader) ([]byte, int, error) { var statusCode int req, err := http.NewRequest("DELETE", url, body) if err != nil { return nil, statusCode, err } if ctx != nil { req = req.WithContext(ctx) } res, err := c.client.Do(req) if err != nil { return nil, statusCode, err } defer res.Body.Close() b, err := checkHTTPResponse(res) statusCode = res.StatusCode return b, statusCode, err } // Client returns the http.Client func (c *HTTPClient) Client() *http.Client { return c.client } // WithClient uses the specified HTTP client func (c *HTTPClient) WithClient(client *http.Client) *HTTPClient { c.client = client return c } // checkHTTPResponse checks if an HTTP response is with normal status codes func checkHTTPResponse(res *http.Response) ([]byte, error) { body, err := io.ReadAll(res.Body) if err != nil { return nil, err } if res.StatusCode < 200 || res.StatusCode >= 400 { return body, fmt.Errorf("error requesting %s, response: %s, code %d", res.Request.URL, string(body), res.StatusCode) } return body, nil } tiup-1.16.3/pkg/utils/ioutil.go000066400000000000000000000223421505422223000163360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "archive/tar" "bufio" "compress/gzip" "crypto/sha1" "encoding/hex" "io" "io/fs" "os" "path" "path/filepath" "strings" "sync" "syscall" "time" "github.com/pingcap/errors" ) var ( fileLocks = make(map[string]*sync.Mutex) filesLock = sync.Mutex{} ) // IsSymExist check whether a symbol link is exist func IsSymExist(path string) bool { _, err := os.Lstat(path) return !os.IsNotExist(err) } // IsExist check whether a path is exist func IsExist(path string) bool { _, err := os.Stat(path) return !os.IsNotExist(err) } // IsNotExist check whether a path is not exist func IsNotExist(path string) bool { _, err := os.Stat(path) return os.IsNotExist(err) } // IsEmptyDir check whether a path is an empty directory func IsEmptyDir(path string) (bool, error) { f, err := os.Open(path) if err != nil { return false, err } defer f.Close() _, err = f.Readdirnames(1) if err == io.EOF { return true, nil } return false, err } // IsExecBinary check whether a path is a valid executable func IsExecBinary(path string) bool { info, err := os.Stat(path) if err != nil { return false } return !info.IsDir() && info.Mode()&0o111 == 0o111 } // IsSubDir returns if sub is a sub directory of parent func IsSubDir(parent, sub string) bool { up := ".." + string(os.PathSeparator) rel, err := filepath.Rel(parent, sub) if err != nil { return false } if !strings.HasPrefix(rel, up) && rel != ".." { return true } return false } // Tar compresses the folder to tarball with gzip func Tar(writer io.Writer, from string) error { compressW := gzip.NewWriter(writer) defer compressW.Close() tarW := tar.NewWriter(compressW) defer tarW.Close() // NOTE: filepath.Walk does not follow the symbolic link. return filepath.Walk(from, func(path string, info fs.FileInfo, err error) error { if err != nil { return err } link := "" if info.Mode()&fs.ModeSymlink != 0 { link, err = os.Readlink(path) if err != nil { return err } } header, _ := tar.FileInfoHeader(info, link) header.Name, _ = filepath.Rel(from, path) // skip "." if header.Name == "." { return nil } err = tarW.WriteHeader(header) if err != nil { return err } if info.Mode().IsRegular() { fd, err := os.Open(path) if err != nil { return err } defer fd.Close() _, err = io.Copy(tarW, fd) return err } return nil }) } // Untar decompresses the tarball func Untar(reader io.Reader, to string) error { gr, err := gzip.NewReader(reader) if err != nil { return errors.Trace(err) } defer gr.Close() tr := tar.NewReader(gr) decFile := func(hdr *tar.Header) error { file := path.Join(to, hdr.Name) err := MkdirAll(filepath.Dir(file), 0o755) if err != nil { return err } fw, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, hdr.FileInfo().Mode()) if err != nil { return errors.Trace(err) } defer fw.Close() _, err = io.Copy(fw, tr) return errors.Trace(err) } for { hdr, err := tr.Next() if err == io.EOF { break } if err != nil { return errors.Trace(err) } switch hdr.Typeflag { case tar.TypeDir: if err := MkdirAll(path.Join(to, hdr.Name), hdr.FileInfo().Mode()); err != nil { return errors.Trace(err) } case tar.TypeSymlink: if err = os.Symlink(hdr.Linkname, filepath.Join(to, hdr.Name)); err != nil { return errors.Trace(err) } default: if err := decFile(hdr); err != nil { return errors.Trace(err) } } } return nil } // Copy copies a file or directory from src to dst func Copy(src, dst string) error { fi, err := os.Stat(src) if err != nil { return err } if fi.IsDir() { // use os.CopyFS to copy a directory srcFS := os.DirFS(src) return os.CopyFS(dst, srcFS) } // for regular files in, err := os.Open(src) if err != nil { return err } defer in.Close() out, err := os.Create(dst) if err != nil { return err } defer out.Close() _, err = io.Copy(out, in) if err != nil { return err } err = out.Close() if err != nil { return err } err = os.Chmod(dst, fi.Mode()) if err != nil { return err } // Make sure the created dst's modify time is newer (at least equal) than src // this is used to workaround github action virtual filesystem ofi, err := os.Stat(dst) if err != nil { return err } if fi.ModTime().After(ofi.ModTime()) { return os.Chtimes(dst, fi.ModTime(), fi.ModTime()) } return nil } // Move moves a file from src to dst, this is done by copying the file and then // delete the old one. Use os.Rename() to rename file within the same filesystem // instead this, it's more lightweight but can not be used across devices. func Move(src, dst string) error { if err := Copy(src, dst); err != nil { return errors.Trace(err) } return errors.Trace(os.RemoveAll(src)) } // Checksum returns the sha1 sum of target file func Checksum(file string) (string, error) { tarball, err := os.OpenFile(file, os.O_RDONLY, 0) if err != nil { return "", err } defer tarball.Close() sha1Writter := sha1.New() if _, err := io.Copy(sha1Writter, tarball); err != nil { return "", err } checksum := hex.EncodeToString(sha1Writter.Sum(nil)) return checksum, nil } // TailN try get the latest n line of the file. func TailN(fname string, n int) (lines []string, err error) { file, err := os.Open(fname) if err != nil { return nil, errors.AddStack(err) } defer file.Close() estimateLineSize := 1024 stat, err := os.Stat(fname) if err != nil { return nil, errors.AddStack(err) } start := max(int(stat.Size())-n*estimateLineSize, 0) _, err = file.Seek(int64(start), 0 /*means relative to the origin of the file*/) if err != nil { return nil, errors.AddStack(err) } scanner := bufio.NewScanner(file) for scanner.Scan() { lines = append(lines, scanner.Text()) } if len(lines) > n { lines = lines[len(lines)-n:] } return } func fileLock(path string) *sync.Mutex { filesLock.Lock() defer filesLock.Unlock() if _, ok := fileLocks[path]; !ok { fileLocks[path] = &sync.Mutex{} } return fileLocks[path] } // SaveFileWithBackup will backup the file before save it. // e.g., backup meta.yaml as meta-2006-01-02T15:04:05Z07:00.yaml // backup the files in the same dir of path if backupDir is empty. func SaveFileWithBackup(path string, data []byte, backupDir string) error { fileLock(path).Lock() defer fileLock(path).Unlock() info, err := os.Stat(path) if err != nil && !os.IsNotExist(err) { return errors.AddStack(err) } if info != nil && info.IsDir() { return errors.Errorf("%s is directory", path) } // backup file if !os.IsNotExist(err) { base := filepath.Base(path) dir := filepath.Dir(path) var backupName string timestr := time.Now().Format(time.RFC3339Nano) p := strings.Split(base, ".") if len(p) == 1 { backupName = base + "-" + timestr } else { backupName = strings.Join(p[0:len(p)-1], ".") + "-" + timestr + "." + p[len(p)-1] } backupData, err := os.ReadFile(path) if err != nil { return errors.AddStack(err) } var backupPath string if backupDir != "" { backupPath = filepath.Join(backupDir, backupName) } else { backupPath = filepath.Join(dir, backupName) } err = os.WriteFile(backupPath, backupData, 0o644) if err != nil { return errors.AddStack(err) } } err = os.WriteFile(path, data, 0o644) if err != nil { return errors.AddStack(err) } return nil } // MkdirAll basically copied from os.MkdirAll, but use max(parent permission,minPerm) func MkdirAll(path string, minPerm os.FileMode) error { // Fast path: if we can tell whether path is a directory or file, stop with success or error. dir, err := os.Stat(path) if err == nil { if dir.IsDir() { return nil } return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } // Slow path: make sure parent exists and then call Mkdir for path. i := len(path) for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. i-- } j := i for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. j-- } if j > 1 { // Create parent. err = MkdirAll(path[:j-1], minPerm) if err != nil { return err } } perm := minPerm fi, err := os.Stat(filepath.Dir(path)) if err == nil { perm |= fi.Mode().Perm() } // Parent now exists; invoke Mkdir and use its result; inheritance parent perm. err = os.Mkdir(path, perm) if err != nil { // Handle arguments like "foo/." by // double-checking that directory doesn't exist. dir, err1 := os.Lstat(path) if err1 == nil && dir.IsDir() { return nil } return err } return nil } // WriteFile call os.WriteFile, but use max(parent permission,minPerm) func WriteFile(name string, data []byte, perm os.FileMode) error { fi, err := os.Stat(filepath.Dir(name)) if err == nil { perm |= (fi.Mode().Perm() & 0o666) } return os.WriteFile(name, data, perm) } tiup-1.16.3/pkg/utils/ioutil_test.go000066400000000000000000000120231505422223000173700ustar00rootroot00000000000000package utils import ( "bytes" "math/rand" "os" "path" "path/filepath" "runtime" "sort" "strconv" "strings" "sync" "testing" "time" "github.com/google/uuid" "github.com/stretchr/testify/require" ) func TestMain(m *testing.M) { os.RemoveAll(path.Join(currentDir(), "testdata", "parent")) os.RemoveAll(path.Join(currentDir(), "testdata", "ssh-exec")) os.RemoveAll(path.Join(currentDir(), "testdata", "nop-nop")) m.Run() } func currentDir() string { _, file, _, _ := runtime.Caller(0) return filepath.Dir(file) } func TestIsExist(t *testing.T) { require.True(t, IsExist("/tmp")) require.False(t, IsExist("/tmp/"+uuid.New().String())) } func TestIsNotExist(t *testing.T) { require.False(t, IsNotExist("/tmp")) require.True(t, IsNotExist("/tmp/"+uuid.New().String())) } func TestIsExecBinary(t *testing.T) { require.False(t, IsExecBinary("/tmp")) e := path.Join(currentDir(), "testdata", "ssh-exec") f, err := os.OpenFile(e, os.O_CREATE, 0o777) require.NoError(t, err) defer f.Close() require.True(t, IsExecBinary(e)) e = path.Join(currentDir(), "testdata", "nop-nop") f, err = os.OpenFile(e, os.O_CREATE, 0o666) require.NoError(t, err) defer f.Close() require.False(t, IsExecBinary(e)) } func TestUntar(t *testing.T) { require.True(t, IsNotExist(path.Join(currentDir(), "testdata", "parent"))) f, err := os.Open(path.Join(currentDir(), "testdata", "test.tar.gz")) require.NoError(t, err) defer f.Close() err = Untar(f, path.Join(currentDir(), "testdata")) require.NoError(t, err) require.True(t, IsExist(path.Join(currentDir(), "testdata", "parent", "child", "content"))) } func TestCopy(t *testing.T) { require.Error(t, Copy(path.Join(currentDir(), "testdata", "test.tar.gz"), "/tmp/not-exists/test.tar.gz")) require.NoError(t, Copy(path.Join(currentDir(), "testdata", "test.tar.gz"), "/tmp/test.tar.gz")) fi, err := os.Stat(path.Join(currentDir(), "testdata", "test.tar.gz")) require.NoError(t, err) fii, err := os.Stat("/tmp/test.tar.gz") require.NoError(t, err) require.Equal(t, fi.Mode(), fii.Mode()) require.NoError(t, os.Chmod("/tmp/test.tar.gz", 0o777)) require.NoError(t, Copy(path.Join(currentDir(), "testdata", "test.tar.gz"), "/tmp/test.tar.gz")) fi, err = os.Stat(path.Join(currentDir(), "testdata", "test.tar.gz")) require.NoError(t, err) fii, err = os.Stat("/tmp/test.tar.gz") require.NoError(t, err) require.Equal(t, fi.Mode(), fii.Mode()) } func TestIsSubDir(t *testing.T) { paths := [][]string{ {"a", "a"}, {"../a", "../a/b"}, {"a", "a/b"}, {"/a", "/a/b"}, } for _, p := range paths { require.True(t, IsSubDir(p[0], p[1])) } paths = [][]string{ {"/a", "a/b"}, {"/a/b/c", "/a/b"}, {"/a/b", "/a/b1"}, } for _, p := range paths { require.False(t, IsSubDir(p[0], p[1])) } } func TestSaveFileWithBackup(t *testing.T) { dir := t.TempDir() name := "meta.yaml" for i := range 10 { err := SaveFileWithBackup(filepath.Join(dir, name), []byte(strconv.Itoa(i)), "") require.NoError(t, err) } // Verify the saved files. var paths []string err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if strings.Contains(path, "meta") { paths = append(paths, path) } return nil }) require.NoError(t, err) require.Equal(t, 10, len(paths)) sort.Strings(paths) for i, path := range paths { data, err := os.ReadFile(path) require.NoError(t, err) require.Equal(t, strconv.Itoa(i), string(data)) } // test with specify backup dir dir = t.TempDir() backupDir := t.TempDir() for i := range 10 { err := SaveFileWithBackup(filepath.Join(dir, name), []byte(strconv.Itoa(i)), backupDir) require.NoError(t, err) } // Verify the saved files in backupDir. paths = nil err = filepath.Walk(backupDir, func(path string, info os.FileInfo, err error) error { if strings.Contains(path, "meta") { paths = append(paths, path) } return nil }) require.NoError(t, err) require.Equal(t, 9, len(paths)) sort.Strings(paths) for i, path := range paths { data, err := os.ReadFile(path) require.NoError(t, err) require.Equal(t, strconv.Itoa(i), string(data)) } // Verify the latest saved file. data, err := os.ReadFile(filepath.Join(dir, name)) require.NoError(t, err) require.Equal(t, "9", string(data)) } func TestConcurrentSaveFileWithBackup(t *testing.T) { dir := t.TempDir() name := "meta.yaml" data := []byte("concurrent-save-file-with-backup") var wg sync.WaitGroup for range 10 { wg.Add(1) go func() { defer wg.Done() time.Sleep(time.Duration(rand.Intn(100)+4) * time.Millisecond) err := SaveFileWithBackup(filepath.Join(dir, name), data, "") require.NoError(t, err) }() } wg.Wait() // Verify the saved files. var paths []string err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if strings.Contains(path, "meta") { paths = append(paths, path) } return nil }) require.NoError(t, err) require.Equal(t, 10, len(paths)) for _, path := range paths { body, err := os.ReadFile(path) require.NoError(t, err) require.Equal(t, len(data), len(body)) require.True(t, bytes.Equal(body, data)) } } tiup-1.16.3/pkg/utils/mock/000077500000000000000000000000001505422223000154305ustar00rootroot00000000000000tiup-1.16.3/pkg/utils/mock/mock.go000066400000000000000000000035011505422223000167070ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package mock import ( "path" "reflect" "sync" "github.com/pingcap/failpoint" ) // Finalizer represent the function that clean a mock point type Finalizer func() type mockPoints struct { m map[string]any l sync.Mutex } func (p *mockPoints) set(fpname string, value any) { p.l.Lock() defer p.l.Unlock() p.m[fpname] = value } func (p *mockPoints) get(fpname string) any { p.l.Lock() defer p.l.Unlock() return p.m[fpname] } func (p *mockPoints) clr(fpname string) { p.l.Lock() defer p.l.Unlock() delete(p.m, fpname) } var points = mockPoints{m: make(map[string]any)} // On inject a failpoint func On(fpname string) any { var ret any failpoint.Inject(fpname, func() { ret = points.get(fpname) }) return ret } // With enable failpoint and provide a value func With(fpname string, value any) Finalizer { if err := failpoint.Enable(failpath(fpname), "return(true)"); err != nil { panic(err) } points.set(fpname, value) return func() { if err := Reset(fpname); err != nil { panic(err) } } } // Reset disable failpoint and remove mock value func Reset(fpname string) error { if err := failpoint.Disable(failpath(fpname)); err != nil { return err } points.clr(fpname) return nil } func failpath(fpname string) string { type em struct{} return path.Join(reflect.TypeOf(em{}).PkgPath(), fpname) } tiup-1.16.3/pkg/utils/regexp.go000066400000000000000000000005611505422223000163220ustar00rootroot00000000000000package utils import "regexp" // MatchGroups turns a slice of matched string to a map according to capture group name func MatchGroups(r *regexp.Regexp, str string) map[string]string { matched := r.FindStringSubmatch(str) results := make(map[string]string) names := r.SubexpNames() for i, value := range matched { results[names[i]] = value } return results } tiup-1.16.3/pkg/utils/regexp_test.go000066400000000000000000000010071505422223000173550ustar00rootroot00000000000000package utils import ( "regexp" "testing" "github.com/stretchr/testify/require" ) func TestMatchGroups(t *testing.T) { cases := []struct { re string str string expected map[string]string }{ { re: `^(?P[a-zA-Z]*)(?P[0-9]*)$`, str: "abc123", expected: map[string]string{ "": "abc123", "first": "abc", "second": "123", }, }, } for _, cas := range cases { require.Equal(t, cas.expected, MatchGroups(regexp.MustCompile(cas.re), cas.str)) } } tiup-1.16.3/pkg/utils/retry.go000066400000000000000000000051631505422223000162000ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "fmt" "strings" "time" ) // RetryUntil when the when func returns true func RetryUntil(f func() error, when func(error) bool) error { e := f() if e == nil { return nil } if when == nil { return RetryUntil(f, nil) } else if when(e) { return RetryUntil(f, when) } return e } // RetryOption is options for Retry() type RetryOption struct { Attempts int64 Delay time.Duration Timeout time.Duration } // default values for RetryOption var ( defaultAttempts int64 = 20 defaultDelay = time.Millisecond * 500 // 500ms defaultTimeout = time.Second * 10 // 10s ) // Retry retries the func until it returns no error or reaches attempts limit or // timed out, either one is earlier func Retry(doFunc func() error, opts ...RetryOption) error { var cfg RetryOption if len(opts) > 0 { cfg = opts[0] } else { cfg = RetryOption{ Attempts: defaultAttempts, Delay: defaultDelay, Timeout: defaultTimeout, } } // timeout must be greater than 0 if cfg.Timeout <= 0 { return fmt.Errorf("timeout (%s) must be greater than 0", cfg.Timeout) } // set options automatically for invalid value if cfg.Delay <= 0 { cfg.Delay = defaultDelay } if cfg.Attempts <= 0 { cfg.Attempts = cfg.Timeout.Milliseconds()/cfg.Delay.Milliseconds() + 1 } timeoutChan := time.After(cfg.Timeout) // call the function var attemptCount int64 var err error for attemptCount = 0; attemptCount < cfg.Attempts; attemptCount++ { if err = doFunc(); err == nil { return nil } // check for timeout select { case <-timeoutChan: return fmt.Errorf("operation timed out after %s", cfg.Timeout) default: time.Sleep(cfg.Delay) } } return fmt.Errorf("operation exceeds the max retry attempts of %d. error of last attempt: %s", cfg.Attempts, err) } // IsTimeoutOrMaxRetry return true if it's timeout or reach max retry. func IsTimeoutOrMaxRetry(err error) bool { if err == nil { return false } s := err.Error() if strings.Contains(s, "operation timed out after") || strings.Contains(s, "operation exceeds the max retry attempts of") { return true } return false } tiup-1.16.3/pkg/utils/semver.go000066400000000000000000000143551505422223000163370ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "fmt" "regexp" "strconv" "strings" "golang.org/x/mod/semver" "slices" "github.com/pingcap/errors" ) // NightlyVersionAlias represents latest build of master branch. const NightlyVersionAlias = "nightly" // LatestVersionAlias represents the latest build (excluding nightly versions). const LatestVersionAlias = "latest" // FmtVer converts a version string to SemVer format, if the string is not a valid // SemVer and fails to parse and convert it, an error is raised. func FmtVer(ver string) (string, error) { v := ver // nightly version is an alias if strings.ToLower(v) == NightlyVersionAlias { return v, nil } // latest version is an alias if strings.ToLower(v) == LatestVersionAlias { return v, nil } if !strings.HasPrefix(ver, "v") { v = fmt.Sprintf("v%s", ver) } if !semver.IsValid(v) { return v, fmt.Errorf("version %s is not a valid SemVer string", ver) } return v, nil } type ( // Version represents a version string, like: v3.1.2 Version string ) // IsValid checks whether is the version string valid func (v Version) IsValid() bool { return v != "" && semver.IsValid(string(v)) } // IsEmpty returns true if the `Version` is a empty string func (v Version) IsEmpty() bool { return v == "" } // IsNightly returns true if the version is nightly func (v Version) IsNightly() bool { return strings.Contains(string(v), NightlyVersionAlias) } // String implements the fmt.Stringer interface func (v Version) String() string { return string(v) } type ver struct { Major, Minor, Patch int Prerelease []string } func (v *ver) Compare(other *ver) int { if d := compareSegment(v.Major, other.Major); d != 0 { return d } if d := compareSegment(v.Minor, other.Minor); d != 0 { return d } if d := compareSegment(v.Patch, other.Patch); d != 0 { return d } return comparePrerelease(v.Prerelease, other.Prerelease) } // Constraint for semver type Constraint struct { // [min, max) min ver // min ver max ver // max ver } var ( constraintRegex = regexp.MustCompile( `^(?P[~^])?v?(?P0|[1-9]\d*)(?:\.(?P0|[1-9]\d*|[x*]))?(?:\.(?P0|[1-9]\d*|[x*]))?(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`, ) versionRegexp = regexp.MustCompile( `^v?(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)\.(?P0|[1-9]\d*)(?:-(?P(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`, ) ) // NewConstraint creates a constraint to check whether a semver is valid. // Only support ^ and ~ and x|X|* func NewConstraint(raw string) (*Constraint, error) { result := MatchGroups(constraintRegex, strings.ToLower(strings.TrimSpace(raw))) if len(result) == 0 { return nil, errors.New("fail to parse version constraint") } c := &Constraint{} defer func() { if len(c.max.Prerelease) == 0 { c.max.Prerelease = []string{"0"} } }() c.min.Major = MustAtoI(result["major"]) c.max.Major = c.min.Major if minor := result["minor"]; minor == "x" || minor == "*" { c.max.Major = c.min.Major + 1 return c, nil } else if minor != "" { c.min.Minor = MustAtoI(result["minor"]) c.max.Minor = c.min.Minor } if patch := result["patch"]; patch == "x" || patch == "*" { c.max.Minor = c.min.Minor + 1 return c, nil } else if patch != "" { c.min.Patch = MustAtoI(result["patch"]) c.max.Patch = c.min.Patch } if prerelease := result["prerelease"]; prerelease != "" { c.min.Prerelease = strings.Split(prerelease, ".") } else { c.min.Prerelease = []string{} } c.max.Prerelease = slices.Clone(c.min.Prerelease) if constraint := result["constraint"]; constraint == "~" { // ~x.y.z -> >=x.y.z =0.0.z c.max.Patch++ } else { // ^0.y.z -> >=0.y.z <0.(y+1).0 c.max.Minor++ c.max.Patch = 0 } } else { // ^x.y.z -> >=x.y.z <(x+1).0.0 c.max.Major++ c.max.Minor = 0 c.max.Patch = 0 } } else if l := len(c.max.Prerelease); l > 0 { c.max.Prerelease[l-1] += " " } else { c.max.Patch++ } return c, nil } // Check checks whether a version is satisfies the constraint func (c *Constraint) Check(v string) bool { result := MatchGroups(versionRegexp, strings.ToLower(strings.TrimSpace(v))) if len(result) == 0 { return false } major := MustAtoI(result["major"]) minor := MustAtoI(result["minor"]) patch := MustAtoI(result["patch"]) version := &ver{ Major: major, Minor: minor, Patch: patch, Prerelease: []string{}, } if pre := result["prerelease"]; pre != "" { version.Prerelease = strings.Split(pre, ".") } return c.min.Compare(version) <= 0 && c.max.Compare(version) > 0 } func compareSegment(v, o int) int { if v < o { return -1 } if v > o { return 1 } return 0 } // 1, -1, 0 means A>B, A len(preA) { return -1 } return 0 } func compareAlphaNum(a, b string) int { if a == b { return 0 } iA, errA := strconv.Atoi(a) iB, errB := strconv.Atoi(b) if errA != nil && errB != nil { if a > b { return 1 } return -1 } // Numeric identifiers always have lower precedence than non-numeric identifiers. if errA != nil { return 1 } if errB != nil { return -1 } if iA > iB { return 1 } return -1 } tiup-1.16.3/pkg/utils/semver_test.go000066400000000000000000000042101505422223000173630ustar00rootroot00000000000000package utils import ( "testing" "github.com/stretchr/testify/require" ) func TestSemverc(t *testing.T) { cases := [][]any{ {"v0.0.1", "v0.0.1", true}, {"0.0.1", "v0.0.1", true}, {"invalid", "vinvalid", false}, {"", "v", false}, {"nightly", "nightly", true}, {"Nightly", "Nightly", true}, } for _, cas := range cases { v, e := FmtVer(cas[0].(string)) require.Equal(t, cas[1].(string), v) require.Equal(t, cas[2].(bool), e == nil) } } func TestVersion(t *testing.T) { require.False(t, Version("").IsValid()) require.False(t, Version("v3.0.").IsValid()) require.True(t, Version("").IsEmpty()) require.False(t, Version("").IsNightly()) require.True(t, Version("nightly").IsNightly()) require.Equal(t, "v1.2.3", Version("v1.2.3").String()) } func TestConstraint(t *testing.T) { cases := []struct { constraint string version string match bool }{ {"^4", "4.1.0", true}, {"4", "4.0.0", true}, {"4.0", "4.0.0", true}, {"~4.0", "4.0.5", true}, {"4.1.x", "4.1.0", true}, {"4.1.x", "4.1.5", true}, {"4.x.0", "4.5.0", true}, {"4.x.0", "4.5.2", true}, {"4.x.x", "4.5.2", true}, {"4.3.2-0", "4.3.2", false}, {"^1.1.0", "1.1.1", true}, {"~1.1.0", "1.1.1", true}, {"~1.1.0", "1.2.0", false}, {"^1.x.x", "1.1.1", true}, {"^2.x.x", "1.1.1", false}, {"^1.x.x", "2.1.1", false}, {"^1.x.x", "1.1.1-beta1", true}, {"^1.1.2-alpha", "1.2.1-beta.1", true}, {"^1.2.x", "1.2.1-beta.1", true}, {"~1.1.1-beta", "1.1.1-alpha", false}, {"~1.1.1-beta", "1.1.1-beta.1", true}, {"~1.1.1-beta", "1.1.1", true}, {"~1.2.3", "1.2.5", true}, {"~1.2.3", "1.2.2", false}, {"~1.2.3", "1.3.2", false}, {"~1.1.*", "1.2.3", false}, {"~1.3.0", "2.4.5", false}, {"^4.0", "5.0.0-rc", false}, {"^4.0-rc", "5.0.0-rc", false}, {"4.0.0-rc", "4.0.0-rc", true}, {"~4.0.0-rc", "4.0.0-rc.1", true}, {"^4", "v5.0.0-20210408", false}, {"^4.*.*", "5.0.0-0", false}, {"5.*.*", "5.0.0-0", false}, {"^4.0.0-1", "4.0.0-1", true}, {"4.0.0-1", "4.0.0-1", true}, } for _, cas := range cases { cons, err := NewConstraint(cas.constraint) require.NoError(t, err) require.Equal(t, cas.match, cons.Check(cas.version)) } } tiup-1.16.3/pkg/utils/sha.go000066400000000000000000000031451505422223000156040ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "crypto/sha256" "crypto/sha512" "encoding/hex" "io" "strings" "github.com/pingcap/errors" ) // CheckSHA256 returns an error if the hash of reader mismatches `sha` func CheckSHA256(reader io.Reader, sha string) error { shaWriter := sha256.New() if _, err := io.Copy(shaWriter, reader); err != nil { return errors.Trace(err) } checksum := hex.EncodeToString(shaWriter.Sum(nil)) if checksum != strings.TrimSpace(sha) { return &HashValidationErr{ cipher: "sha256", expect: sha, actual: checksum, } } return nil } // SHA256 returns the hash of reader func SHA256(reader io.Reader) (string, error) { shaWriter := sha256.New() if _, err := io.Copy(shaWriter, reader); err != nil { return "", errors.Trace(err) } checksum := hex.EncodeToString(shaWriter.Sum(nil)) return checksum, nil } // SHA512 returns the hash of reader func SHA512(reader io.Reader) (string, error) { shaWriter := sha512.New() if _, err := io.Copy(shaWriter, reader); err != nil { return "", errors.Trace(err) } checksum := hex.EncodeToString(shaWriter.Sum(nil)) return checksum, nil } tiup-1.16.3/pkg/utils/tabledisplayer.go000066400000000000000000000022611505422223000200330ustar00rootroot00000000000000package utils import ( "fmt" "io" "strings" ) // TableDisplayer is a simple table displayer type TableDisplayer struct { Header []string Rows [][]string Writer io.Writer } // NewTableDisplayer creates a new TableDisplayer func NewTableDisplayer(w io.Writer, header []string) *TableDisplayer { return &TableDisplayer{ Writer: w, Header: header, } } // AddRow adds a row to the table func (t *TableDisplayer) AddRow(row ...string) { // cut items if row is longer than header if len(row) > len(t.Header) { row = row[:len(t.Header)] } t.Rows = append(t.Rows, row) } // Display the table func (t *TableDisplayer) Display() { lens := make([]int, len(t.Header)) for i, h := range t.Header { lens[i] = len(h) } for _, row := range t.Rows { for i, r := range row { if len(r) > lens[i] { lens[i] = len(r) } } } outputs := [][]string{t.Header, {}} for _, item := range t.Header { outputs[1] = append(outputs[1], strings.Repeat("-", len(item))) } outputs = append(outputs, t.Rows...) for _, row := range outputs { for i, r := range row[:len(row)-1] { fmt.Fprintf(t.Writer, "%-*s", lens[i]+2, r) } fmt.Fprintf(t.Writer, "%s\n", row[len(row)-1]) } } tiup-1.16.3/pkg/utils/testdata/000077500000000000000000000000001505422223000163105ustar00rootroot00000000000000tiup-1.16.3/pkg/utils/testdata/test.tar.gz000066400000000000000000000002661505422223000204220ustar00rootroot00000000000000r6g^ҍ 0a/ewЙۙ#X?J  aplڼ;.YdVT%S낳qdNl]y[95}5eefQ~Wg.CESq=zҍ<`.R:(tiup-1.16.3/pkg/utils/user.go000066400000000000000000000010131505422223000157770ustar00rootroot00000000000000package utils import ( "os/user" logprinter "github.com/pingcap/tiup/pkg/logger/printer" ) // CurrentUser returns current login user func CurrentUser() string { user, err := user.Current() if err != nil { logprinter.Errorf("Get current user: %s", err) return "root" } return user.Username } // UserHome returns home directory of current user func UserHome() string { user, err := user.Current() if err != nil { logprinter.Errorf("Get current user home: %s", err) return "root" } return user.HomeDir } tiup-1.16.3/pkg/utils/utils.go000066400000000000000000000040651505422223000161730ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package utils import ( "math" "net" "strconv" "strings" "time" "github.com/spf13/pflag" ) // JoinInt joins a slice of int to string func JoinInt(nums []int, delim string) string { result := "" for _, i := range nums { result += strconv.Itoa(i) result += delim } return strings.TrimSuffix(result, delim) } // IsFlagSetByUser check if the a flag is set by user explicitly func IsFlagSetByUser(flagSet *pflag.FlagSet, flagName string) bool { setByUser := false flagSet.Visit(func(f *pflag.Flag) { if f.Name == flagName { setByUser = true } }) return setByUser } // MustAtoI calls strconv.Atoi and ignores error func MustAtoI(a string) int { v, _ := strconv.Atoi(a) return v } // Base62Tag returns a tag based on time func Base62Tag() string { const base = 62 const sets = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" b := make([]byte, 0) num := time.Now().UnixNano() / int64(time.Millisecond) for num > 0 { r := math.Mod(float64(num), float64(base)) num /= base b = append([]byte{sets[int(r)]}, b...) } return string(b) } // Ternary operator func Ternary(condition bool, a, b any) any { if condition { return a } return b } // JoinHostPort return host and port func JoinHostPort(host string, port int) string { return net.JoinHostPort(host, strconv.Itoa(port)) } // ParseHostPort Prase host and port func ParseHostPort(hostport string) (host, port string) { colon := strings.LastIndex(hostport, ":") host = strings.TrimSuffix(strings.TrimPrefix(hostport[:colon], "["), "]") port = hostport[colon+1:] return } tiup-1.16.3/pkg/version/000077500000000000000000000000001505422223000150245ustar00rootroot00000000000000tiup-1.16.3/pkg/version/types.go000066400000000000000000000034561505422223000165270ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package version import ( "fmt" "runtime" ) // TiUPVersion is the semver of TiUP type TiUPVersion struct { major int minor int patch int name string } // NewTiUPVersion creates a TiUPVersion object func NewTiUPVersion() *TiUPVersion { return &TiUPVersion{ major: TiUPVerMajor, minor: TiUPVerMinor, patch: TiUPVerPatch, name: TiUPVerName, } } // Name returns the alternave name of TiUPVersion func (v *TiUPVersion) Name() string { return v.name } // SemVer returns TiUPVersion in semver format func (v *TiUPVersion) SemVer() string { return fmt.Sprintf("%d.%d.%d", v.major, v.minor, v.patch) } // String converts TiUPVersion to a string func (v *TiUPVersion) String() string { return fmt.Sprintf("%s %s\n%s", v.SemVer(), v.name, NewTiUPBuildInfo()) } // TiUPBuild is the info of building environment type TiUPBuild struct { GitHash string `json:"gitHash"` GitRef string `json:"gitRef"` GoVersion string `json:"goVersion"` } // NewTiUPBuildInfo creates a TiUPBuild object func NewTiUPBuildInfo() *TiUPBuild { return &TiUPBuild{ GitHash: GitHash, GitRef: GitRef, GoVersion: runtime.Version(), } } // String converts TiUPBuild to a string func (v *TiUPBuild) String() string { return fmt.Sprintf("Go Version: %s\nGit Ref: %s\nGitHash: %s", v.GoVersion, v.GitRef, v.GitHash) } tiup-1.16.3/pkg/version/version.go000066400000000000000000000021511505422223000170370ustar00rootroot00000000000000// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. // This file only contains version related variables and consts, all // type definitions and functions shall not be implemented here. // This file is excluded from CI tests. package version var ( // TiUPVerMajor is the major version of TiUP TiUPVerMajor = 1 // TiUPVerMinor is the minor version of TiUP TiUPVerMinor = 16 // TiUPVerPatch is the patch version of TiUP TiUPVerPatch = 3 // TiUPVerName is an alternative name of the version TiUPVerName = "tiup" // GitHash is the current git commit hash GitHash = "Unknown" // GitRef is the current git reference name (branch or tag) GitRef = "Unknown" ) tiup-1.16.3/server/000077500000000000000000000000001505422223000140645ustar00rootroot00000000000000tiup-1.16.3/server/handler/000077500000000000000000000000001505422223000155015ustar00rootroot00000000000000tiup-1.16.3/server/handler/component.go000066400000000000000000000062241505422223000200360ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "encoding/json" "net/http" "slices" "github.com/gorilla/mux" "github.com/pingcap/fn" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/repository/model" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/server/session" ) // SignComponent handles requests to re-sign component manifest func SignComponent(sm session.Manager, mirror repository.Mirror) http.Handler { return &componentSigner{sm, mirror} } type componentSigner struct { sm session.Manager mirror repository.Mirror } func (h *componentSigner) ServeHTTP(w http.ResponseWriter, r *http.Request) { fn.Wrap(h.sign).ServeHTTP(w, r) } func buildInfo(r *http.Request, sid string) *model.PublishInfo { info := &model.PublishInfo{} m := map[string]**bool{ repository.OptionYanked: &info.Yank, repository.OptionStandalone: &info.Stand, repository.OptionHidden: &info.Hide, } for k, v := range m { f := false if query(r, k) == "true" { f = true *v = &f } else if query(r, k) == "false" { *v = &f } } return info } func (h *componentSigner) sign(r *http.Request, m *v1manifest.RawManifest) (sr *simpleResponse, err statusError) { sid := mux.Vars(r)["sid"] name := mux.Vars(r)["name"] info := buildInfo(r, sid) blackList := []string{"root", "index", "snapshot", "timestamp"} if slices.Contains(blackList, name) { return nil, ErrorForbiden } logprinter.Infof("Sign component manifest for %s, sid: %s", name, sid) fileName, readCloser, rErr := h.sm.Read(sid) if rErr != nil { logprinter.Errorf("Read tar info for component %s, sid: %s", name, sid) return nil, ErrorInternalError } info.ComponentData = &model.TarInfo{ Reader: readCloser, Name: fileName, } comp := v1manifest.Component{} if err := json.Unmarshal(m.Signed, &comp); err != nil { logprinter.Errorf("Unmarshal manifest %s", err.Error()) return nil, ErrorInvalidManifest } manifest := &v1manifest.Manifest{ Signatures: m.Signatures, Signed: &comp, } switch err := h.mirror.Publish(manifest, info); err { case model.ErrorConflict: return nil, ErrorManifestConflict case model.ErrorWrongSignature: return nil, ErrorForbiden case model.ErrorWrongChecksum, model.ErrorWrongFileName: logprinter.Errorf("Publish component: %s", err.Error()) return nil, ErrorInvalidTarball case nil: return nil, nil default: h.sm.Delete(sid) logprinter.Errorf("Publish component: %s", err.Error()) return nil, ErrorInternalError } } func query(r *http.Request, q string) string { qs := r.URL.Query()[q] if len(qs) == 0 { return "" } return qs[0] } tiup-1.16.3/server/handler/error.go000066400000000000000000000045401505422223000171640ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package handler import "net/http" type statusError interface { StatusCode() int Status() string Error() string } type handlerError struct { code int status string message string } func newHandlerError(code int, status, message string) statusError { return &handlerError{ code: code, status: status, message: message, } } func (e *handlerError) StatusCode() int { return e.code } func (e *handlerError) Status() string { return e.status } func (e *handlerError) Error() string { return e.message } var ( // ErrorSessionMissing indicates that the specified session not found ErrorSessionMissing = newHandlerError(http.StatusNotFound, "SESSION NOT FOUND", "session with specified identity not found") // ErrorManifestMissing indicates that the specified component doesn't have manifest yet ErrorManifestMissing = newHandlerError(http.StatusNotFound, "MANIFEST NOT FOUND", "that component doesn't have manifest yet") // ErrorInvalidTarball indicates that the tarball is not valid (eg. too large) ErrorInvalidTarball = newHandlerError(http.StatusBadRequest, "INVALID TARBALL", "the tarball content is not valid") // ErrorInvalidManifest indicates that the manifest is not valid ErrorInvalidManifest = newHandlerError(http.StatusBadRequest, "INVALID MANIFEST", "the manifest content is not valid") // ErrorInternalError indicates that an internal error happened ErrorInternalError = newHandlerError(http.StatusInternalServerError, "INTERNAL ERROR", "an internal error happened") // ErrorManifestConflict indicates that the uploaded manifest is not new enough ErrorManifestConflict = newHandlerError(http.StatusConflict, "MANIFEST CONFLICT", "the manifest provided is not new enough") // ErrorForbiden indicates that the user can't access target resource ErrorForbiden = newHandlerError(http.StatusForbidden, "FORBIDDEN", "permission denied") ) tiup-1.16.3/server/handler/handler.go000066400000000000000000000023741505422223000174530ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "context" "github.com/pingcap/fn" logprinter "github.com/pingcap/tiup/pkg/logger/printer" ) // errorMessage is used for error response type errorMessage struct { Status string `json:"status"` Message string `json:"message"` } func init() { fn.SetErrorEncoder(func(ctx context.Context, err error) any { logprinter.Debugf("Response an error message to client") if e, ok := err.(statusError); ok { logprinter.Debugf("Response status error to client: %s", e.Error()) return &errorMessage{ Status: e.Status(), Message: e.Error(), } } logprinter.Debugf("Unknow error occurred: %s", err.Error()) return &errorMessage{ Status: "UNKNOWN_ERROR", Message: "make sure your request is valid", } }) } tiup-1.16.3/server/handler/rotate.go000066400000000000000000000032651505422223000173340ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "encoding/json" "net/http" "github.com/pingcap/fn" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/pkg/repository/model" "github.com/pingcap/tiup/pkg/repository/v1manifest" ) // RotateRoot handles requests to re-sign root manifest func RotateRoot(mirror repository.Mirror) http.Handler { return &rootSigner{mirror} } type rootSigner struct { mirror repository.Mirror } func (h *rootSigner) ServeHTTP(w http.ResponseWriter, r *http.Request) { fn.Wrap(h.sign).ServeHTTP(w, r) } func (h *rootSigner) sign(m *v1manifest.RawManifest) (sr *simpleResponse, err statusError) { root := v1manifest.Root{} if err := json.Unmarshal(m.Signed, &root); err != nil { logprinter.Errorf("Unmarshal manifest %s", err.Error()) return nil, ErrorInvalidManifest } manifest := &v1manifest.Manifest{ Signatures: m.Signatures, Signed: &root, } switch err := h.mirror.Rotate(manifest); err { case model.ErrorConflict: return nil, ErrorManifestConflict case nil: return nil, nil default: logprinter.Errorf("Rotate root manifest: %s", err.Error()) return nil, ErrorInternalError } } tiup-1.16.3/server/handler/tarball.go000066400000000000000000000031451505422223000174540ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package handler import ( "net/http" "github.com/gorilla/mux" "github.com/pingcap/fn" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/server/session" ) // MaxMemory is the a total of max bytes of its file parts stored in memory const MaxMemory = 32 * 1024 * 1024 // UploadTarbal handle tarball upload func UploadTarbal(sm session.Manager) http.Handler { return &tarballUploader{sm} } type tarballUploader struct { sm session.Manager } func (h *tarballUploader) ServeHTTP(w http.ResponseWriter, r *http.Request) { fn.Wrap(h.upload).ServeHTTP(w, r) } func (h *tarballUploader) upload(r *http.Request) (*simpleResponse, statusError) { sid := mux.Vars(r)["sid"] logprinter.Infof("Uploading tarball, sid: %s", sid) file, handler, err := r.FormFile("file") if err != nil { // TODO: log error here return nil, ErrorInvalidTarball } defer file.Close() defer r.MultipartForm.RemoveAll() if err := h.sm.Write(sid, handler.Filename, file); err != nil { logprinter.Errorf("Error to write tarball: %s", err.Error()) return nil, ErrorInternalError } return nil, nil } tiup-1.16.3/server/handler/types.go000066400000000000000000000011641505422223000171760ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package handler type simpleResponse struct { Status string `json:"status"` Message string `json:"message"` } tiup-1.16.3/server/main.go000066400000000000000000000030031505422223000153330ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "os" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/version" "github.com/spf13/cobra" ) func main() { addr := "0.0.0.0:8989" keyDir := "" upstream := "https://tiup-mirrors.pingcap.com" cmd := &cobra.Command{ Use: fmt.Sprintf("%s ", os.Args[0]), Short: "bootstrap a mirror server", Version: version.NewTiUPVersion().String(), RunE: func(cmd *cobra.Command, args []string) error { if len(args) != 1 { return cmd.Help() } s, err := newServer(args[0], keyDir, upstream) if err != nil { return err } return s.run(addr) }, } cmd.Flags().StringVarP(&addr, "addr", "", addr, "addr to listen") cmd.Flags().StringVarP(&keyDir, "key-dir", "", keyDir, "specify the directory where stores the private keys") cmd.Flags().StringVarP(&upstream, "upstream", "", upstream, "specify the upstream mirror") if err := cmd.Execute(); err != nil { logprinter.Errorf("Execute command: %s", err.Error()) } } tiup-1.16.3/server/package/000077500000000000000000000000001505422223000154575ustar00rootroot00000000000000tiup-1.16.3/server/package/package.go000066400000000000000000000061431505422223000174050ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "os" "os/exec" "runtime" "github.com/pingcap/tiup/pkg/utils" "github.com/spf13/cobra" ) func main() { if err := execute(); err != nil { fmt.Println("Packaging component failed:", err) os.Exit(1) } } type packageOptions struct { goos string goarch string dir string name string version string entry string desc string standalone bool hide bool } func execute() error { options := packageOptions{} rootCmd := &cobra.Command{ Use: "tiup package target", Short: "Package a tiup component and generate package directory", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) < 1 { return cmd.Help() } return pack(args, options) }, } // some arguments are not used anymore, we keep them to make it compatible // with legacy CI jobs rootCmd.Flags().StringVar(&options.goos, "os", runtime.GOOS, "Target OS of the package") rootCmd.Flags().StringVar(&options.goarch, "arch", runtime.GOARCH, "Target ARCH of the package") rootCmd.Flags().StringVarP(&options.dir, "", "C", "", "Change directory before compress") rootCmd.Flags().StringVar(&options.name, "name", "", "Name of the package (required)") rootCmd.Flags().StringVar(&options.version, "release", "", "Version of the package (required)") rootCmd.Flags().StringVar(&options.entry, "entry", "", "(deprecated) Entry point of the package") rootCmd.Flags().StringVar(&options.desc, "desc", "", "(deprecated) Description of the package") rootCmd.Flags().BoolVar(&options.standalone, "standalone", false, "(deprecated) Can the component run standalone") rootCmd.Flags().BoolVar(&options.hide, "hide", false, "(deprecated) Don't show the component in `tiup list`") _ = rootCmd.MarkFlagRequired("name") _ = rootCmd.MarkFlagRequired("release") return rootCmd.Execute() } func pack(targets []string, options packageOptions) error { if err := utils.MkdirAll("package", 0755); err != nil { return err } // tar -czf package/{name}-{version}-{goos}-{goarch}.tar.gz target return packTarget(targets, options) } func packTarget(targets []string, options packageOptions) error { file := fmt.Sprintf("package/%s-%s-%s-%s.tar.gz", options.name, options.version, options.goos, options.goarch) args := []string{"-czf", file} if options.dir != "" { args = append(args, "-C", options.dir) } cmd := exec.Command("tar", append(args, targets...)...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr fmt.Println(cmd.Args) if err := cmd.Run(); err != nil { return fmt.Errorf("package target: %s", err.Error()) } return nil } tiup-1.16.3/server/rotate/000077500000000000000000000000001505422223000153625ustar00rootroot00000000000000tiup-1.16.3/server/rotate/component.go000066400000000000000000000060231505422223000177140ustar00rootroot00000000000000// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package rotate import ( "context" "fmt" "net/http" cjson "github.com/gibson042/canonicaljson-go" "github.com/gorilla/mux" "github.com/pingcap/errors" "github.com/pingcap/fn" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" ) // ServeComponent starts a temp server for receiving component signatures from owner func ServeComponent(addr string, owner *v1manifest.Owner, comp *v1manifest.Component) (*v1manifest.Manifest, error) { r := mux.NewRouter() uri := fmt.Sprintf("/rotate/%s", utils.Base62Tag()) r.Handle(uri, fn.Wrap(func() (*v1manifest.Manifest, error) { return &v1manifest.Manifest{Signed: comp}, nil })).Methods("GET") sigCh := make(chan v1manifest.Signature) r.Handle(uri, fn.Wrap(func(m *v1manifest.RawManifest) (*v1manifest.Manifest /* always nil */, error) { for _, sig := range m.Signatures { if err := verifyComponentSig(sig, owner, comp); err != nil { return nil, err } sigCh <- sig } return nil, nil })).Methods("POST") srv := &http.Server{Addr: addr, Handler: r} go func() { if err := srv.ListenAndServe(); err != nil { logprinter.Errorf("server closed: %s", err.Error()) } close(sigCh) }() manifest := &v1manifest.Manifest{Signed: comp} status := newStatusRender(owner.Keys, addr, uri) defer status.stop() SIGLOOP: for sig := range sigCh { for _, s := range manifest.Signatures { if s.KeyID == sig.KeyID { // Duplicate signature continue SIGLOOP } } manifest.Signatures = append(manifest.Signatures, sig) status.render(manifest) if len(manifest.Signatures) == len(owner.Keys) { _ = srv.Shutdown(context.Background()) break } } if len(manifest.Signatures) != len(owner.Keys) { return nil, errors.New("no enough signature collected before server shutdown") } return manifest, nil } func verifyComponentSig(sig v1manifest.Signature, owner *v1manifest.Owner, comp *v1manifest.Component) error { payload, err := cjson.Marshal(comp) if err != nil { return fn.ErrorWithStatusCode(errors.Annotate(err, "marshal component manifest"), http.StatusInternalServerError) } k := owner.Keys[sig.KeyID] if k == nil { // Received a signature signed by an invalid key return fn.ErrorWithStatusCode(errors.New("the key is not valid"), http.StatusNotAcceptable) } if err := k.Verify(payload, sig.Sig); err != nil { // Received an invalid signature return fn.ErrorWithStatusCode(errors.New("the signature is not valid"), http.StatusNotAcceptable) } return nil } tiup-1.16.3/server/rotate/root.go000066400000000000000000000061031505422223000166740ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package rotate import ( "context" "fmt" "net/http" cjson "github.com/gibson042/canonicaljson-go" "github.com/gorilla/mux" "github.com/pingcap/errors" "github.com/pingcap/fn" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/utils" ) // ServeRoot starts a temp server for receiving root signatures from administrators func ServeRoot(addr string, root *v1manifest.Root) (*v1manifest.Manifest, error) { r := mux.NewRouter() uri := fmt.Sprintf("/rotate/%s", utils.Base62Tag()) r.Handle(uri, fn.Wrap(func() (*v1manifest.Manifest, error) { return &v1manifest.Manifest{Signed: root}, nil })).Methods("GET") sigCh := make(chan v1manifest.Signature) r.Handle(uri, fn.Wrap(func(m *v1manifest.RawManifest) (*v1manifest.Manifest /* always nil */, error) { for _, sig := range m.Signatures { if err := verifyRootSig(sig, root); err != nil { return nil, err } sigCh <- sig } return nil, nil })).Methods("POST") srv := &http.Server{Addr: addr, Handler: r} go func() { if err := srv.ListenAndServe(); err != nil { logprinter.Errorf("server closed: %s", err.Error()) } close(sigCh) }() manifest := &v1manifest.Manifest{Signed: root} status := newStatusRender(root.Roles[v1manifest.ManifestTypeRoot].Keys, addr, uri) defer status.stop() SIGLOOP: for sig := range sigCh { for _, s := range manifest.Signatures { if s.KeyID == sig.KeyID { // Duplicate signature continue SIGLOOP } } manifest.Signatures = append(manifest.Signatures, sig) status.render(manifest) if len(manifest.Signatures) == len(root.Roles[v1manifest.ManifestTypeRoot].Keys) { _ = srv.Shutdown(context.Background()) break } } if len(manifest.Signatures) != len(root.Roles[v1manifest.ManifestTypeRoot].Keys) { return nil, errors.New("no enough signature collected before server shutdown") } return manifest, nil } func verifyRootSig(sig v1manifest.Signature, root *v1manifest.Root) error { payload, err := cjson.Marshal(root) if err != nil { return fn.ErrorWithStatusCode(errors.Annotate(err, "marshal root manifest"), http.StatusInternalServerError) } k := root.Roles[v1manifest.ManifestTypeRoot].Keys[sig.KeyID] if k == nil { // Received a signature signed by an invalid key return fn.ErrorWithStatusCode(errors.New("the key is not valid"), http.StatusNotAcceptable) } if err := k.Verify(payload, sig.Sig); err != nil { // Received an invalid signature return fn.ErrorWithStatusCode(errors.New("the signature is not valid"), http.StatusNotAcceptable) } return nil } tiup-1.16.3/server/rotate/server.go000066400000000000000000000034641505422223000172260ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package rotate import ( "fmt" "net" "strings" "github.com/pingcap/tiup/pkg/repository/v1manifest" "github.com/pingcap/tiup/pkg/tui/progress" ) type statusRender struct { mbar *progress.MultiBar bars map[string]*progress.MultiBarItem } func newStatusRender(keys map[string]*v1manifest.KeyInfo, addr, uri string) *statusRender { ss := strings.Split(addr, ":") if strings.Trim(ss[0], " ") == "" || strings.Trim(ss[0], " ") == "0.0.0.0" { addrs, _ := net.InterfaceAddrs() for _, addr := range addrs { if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && ip.IP.To4() != nil { ss[0] = ip.IP.To4().String() break } } } status := &statusRender{ mbar: progress.NewMultiBar(fmt.Sprintf("Waiting all key holders to sign http://%s%s", strings.Join(ss, ":"), uri)), bars: make(map[string]*progress.MultiBarItem), } for key := range keys { status.bars[key] = status.mbar.AddBar(fmt.Sprintf(" - Waiting key %s", key)) } status.mbar.StartRenderLoop() return status } func (s *statusRender) render(manifest *v1manifest.Manifest) { for _, sig := range manifest.Signatures { s.bars[sig.KeyID].UpdateDisplay(&progress.DisplayProps{ Prefix: fmt.Sprintf(" - Waiting key %s", sig.KeyID), Mode: progress.ModeDone, }) } } func (s *statusRender) stop() { s.mbar.StopRenderLoop() } tiup-1.16.3/server/router.go000066400000000000000000000032371505422223000157400ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "net/http" "time" "github.com/gorilla/mux" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/server/handler" ) type traceResponseWriter struct { http.ResponseWriter statusCode int } func (w *traceResponseWriter) WriteHeader(code int) { w.statusCode = code w.ResponseWriter.WriteHeader(code) } func httpRequestMiddleware(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logprinter.Infof("Request : %s - %s - %s", r.RemoteAddr, r.Method, r.URL) start := time.Now() tw := &traceResponseWriter{w, http.StatusOK} h.ServeHTTP(tw, r) logprinter.Infof("Response [%d] : %s - %s - %s (%.3f sec)", tw.statusCode, r.RemoteAddr, r.Method, r.URL, time.Since(start).Seconds()) }) } func (s *server) router() http.Handler { r := mux.NewRouter() r.Handle("/api/v1/tarball/{sid}", handler.UploadTarbal(s.sm)) r.Handle("/api/v1/component/{sid}/{name}", handler.SignComponent(s.sm, s.mirror)) r.Handle("/api/v1/rotate", handler.RotateRoot(s.mirror)) r.PathPrefix("/").Handler(s.static("/", s.mirror.Source(), s.upstream)) return httpRequestMiddleware(r) } tiup-1.16.3/server/server.go000066400000000000000000000023141505422223000157210ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "net/http" "github.com/pingcap/tiup/pkg/repository" "github.com/pingcap/tiup/server/session" ) type server struct { mirror repository.Mirror sm session.Manager upstream string } // NewServer returns a pointer to server func newServer(rootDir, keyDir, upstream string) (*server, error) { mirror := repository.NewMirror(rootDir, repository.MirrorOptions{Upstream: upstream, KeyDir: keyDir}) if err := mirror.Open(); err != nil { return nil, err } s := &server{ mirror: mirror, sm: session.New(), upstream: upstream, } return s, nil } func (s *server) run(addr string) error { fmt.Println(addr) return http.ListenAndServe(addr, s.router()) } tiup-1.16.3/server/session/000077500000000000000000000000001505422223000155475ustar00rootroot00000000000000tiup-1.16.3/server/session/session.go000066400000000000000000000064121505422223000175640ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package session import ( "io" "os" "path" "sync" "time" "github.com/pingcap/errors" "github.com/pingcap/tiup/pkg/localdata" logprinter "github.com/pingcap/tiup/pkg/logger/printer" "github.com/pingcap/tiup/pkg/utils" ) // Max alive time of a session const maxAliveTime = 600 * time.Second var ( // ErrorSessionConflict indicates that a same session existed ErrorSessionConflict = errors.New("a session with same identity has been existed") ) // Manager provide methods to operates on upload sessions type Manager interface { Write(id string, name string, reader io.Reader) error Read(id string) (string, io.ReadCloser, error) Delete(id string) } type sessionManager struct { m *sync.Map } // New returns a session manager func New() Manager { return &sessionManager{ m: &sync.Map{}, } } // Write start a new session func (s *sessionManager) Write(id, name string, reader io.Reader) error { if _, ok := s.m.Load(id); ok { return ErrorSessionConflict } logprinter.Debugf("Begin new session: %s", id) s.m.Store(id, name) go s.gc(id) dataDir := os.Getenv(localdata.EnvNameComponentDataDir) if dataDir == "" { return errors.Errorf("cannot read environment variable %s", localdata.EnvNameComponentDataDir) } pkgDir := path.Join(dataDir, "packages") if err := utils.MkdirAll(pkgDir, 0755); err != nil { return errors.Annotate(err, "create package dir") } filePath := path.Join(pkgDir, id+"_"+name) file, err := os.Create(filePath) if err != nil { return errors.Annotate(err, "create tar file") } defer file.Close() if _, err := io.Copy(file, reader); err != nil { return errors.Annotate(err, "write tar file") } return nil } // Read returns the tar file of given session func (s *sessionManager) Read(id string) (string, io.ReadCloser, error) { n, ok := s.m.Load(id) if !ok { return "", nil, nil } name := n.(string) dataDir := os.Getenv(localdata.EnvNameComponentDataDir) if dataDir == "" { return "", nil, errors.Errorf("cannot read environment variable %s", localdata.EnvNameComponentDataDir) } pkgDir := path.Join(dataDir, "packages") if err := utils.MkdirAll(pkgDir, 0755); err != nil { return "", nil, errors.Annotate(err, "create package dir") } filePath := path.Join(pkgDir, id+"_"+name) file, err := os.Open(filePath) if err != nil { return "", nil, errors.Annotate(err, "open tar file") } return name, file, nil } // Delete a session func (s *sessionManager) Delete(id string) { logprinter.Debugf("Delete session: %s", id) n, ok := s.m.Load(id) if !ok { return } name := n.(string) os.Remove(path.Join(os.Getenv(localdata.EnvNameComponentDataDir), "packages", id+"_"+name)) s.m.Delete(id) } func (s *sessionManager) gc(id string) { time.Sleep(maxAliveTime) if _, ok := s.m.Load(id); !ok { return } s.Delete(id) } tiup-1.16.3/server/static.go000066400000000000000000000032761505422223000157120ustar00rootroot00000000000000// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "net/http" "net/http/httputil" "net/url" "os" "path" logprinter "github.com/pingcap/tiup/pkg/logger/printer" ) // staticServer start a static web server func staticServer(local string, upstream string) http.Handler { fs := http.Dir(local) fsh := http.FileServer(fs) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if f, err := fs.Open(path.Clean(r.URL.Path)); err == nil { f.Close() } else if os.IsNotExist(err) && upstream != "" { if err := proxyUpstream(w, r, path.Join(local, path.Clean(r.URL.Path)), upstream); err != nil { logprinter.Errorf("Proxy upstream: %s", err.Error()) fsh.ServeHTTP(w, r) } logprinter.Errorf("Handle file: %s", err.Error()) return } fsh.ServeHTTP(w, r) }) } func proxyUpstream(w http.ResponseWriter, r *http.Request, file, upstream string) error { url, err := url.Parse(upstream) if err != nil { return err } r.Host = url.Host r.URL.Host = url.Host r.URL.Scheme = url.Scheme httputil.NewSingleHostReverseProxy(url).ServeHTTP(w, r) return nil } func (s *server) static(prefix, root, upstream string) http.Handler { return http.StripPrefix(prefix, staticServer(root, upstream)) } tiup-1.16.3/tests/000077500000000000000000000000001505422223000137205ustar00rootroot00000000000000tiup-1.16.3/tests/Makefile000066400000000000000000000023071505422223000153620ustar00rootroot00000000000000build_integration_test: build_tiup_playground_test build_tiup_cluster_test build_tiup_dm_test @# Target: run the playground, cluster, and dm integration tests. build_tiup_playground_test: failpoint-enable @# Target: run the tiup-playground tests $(GOTEST) -c -cover -covermode=count \ -coverpkg=github.com/pingcap/tiup/... \ -o tests/tiup-playground/bin/tiup-playground.test \ github.com/pingcap/tiup/components/playground @$(FAILPOINT_DISABLE) build_tiup_cluster_test: failpoint-enable @# Target: run the tiup-cluster tests $(GOTEST) -c -cover -covermode=count \ -coverpkg=github.com/pingcap/tiup/... \ -o tests/tiup-cluster/bin/tiup-cluster.test \ github.com/pingcap/tiup/components/cluster; @$(FAILPOINT_DISABLE) build_tiup_dm_test: failpoint-enable @# Target: run the tiup-dm tests $(GOTEST) -c -cover -covermode=count \ -coverpkg=github.com/pingcap/tiup/... \ -o tests/tiup-dm/bin/tiup-dm.test \ github.com/pingcap/tiup/components/dm; @$(FAILPOINT_DISABLE) build_tiup_test: failpoint-enable @# Target: run the tiup tests $(GOTEST) -c -cover -covermode=count \ -coverpkg=github.com/pingcap/tiup/... \ -o tests/tiup/bin/tiup.test \ github.com/pingcap/tiup; @$(FAILPOINT_DISABLE) tiup-1.16.3/tests/expected/000077500000000000000000000000001505422223000155215ustar00rootroot00000000000000tiup-1.16.3/tests/expected/tiup/000077500000000000000000000000001505422223000165025ustar00rootroot00000000000000tiup-1.16.3/tests/expected/tiup/tiup-binary-package.output000066400000000000000000000000721505422223000236170ustar00rootroot00000000000000TIUP_HOME_INTEGRATION_TEST/components/package/v0.0.1/pack tiup-1.16.3/tests/expected/tiup/tiup-clean-all.output000066400000000000000000000003441505422223000225740ustar00rootroot00000000000000Clean instance of `test`, directory: TIUP_HOME_INTEGRATION_TEST/data/test-0 Clean instance of `test`, directory: TIUP_HOME_INTEGRATION_TEST/data/test-1 Clean instance of `test`, directory: TIUP_HOME_INTEGRATION_TEST/data/test-2 tiup-1.16.3/tests/expected/tiup/tiup-h.output000066400000000000000000000053351505422223000212000ustar00rootroot00000000000000TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system. You can run a specific version of a component via "tiup [:version]". If no version number is specified, the latest version installed locally will be used. If the specified component does not have any version installed locally, the latest stable version will be downloaded from the repository. Usage: tiup [flags] [args...] tiup [flags] [args...] Available Commands: install Install a specific version of a component list List the available TiDB components or versions uninstall Uninstall components or versions of a component update Update tiup components to the latest version status List the status of instantiated components clean Clean the data of instantiated components telemetry Controls things about telemetry help Help about any command or component Available Components: mirrors doc playground ctl Flags: -B, --binary [:version] Print binary path of a specific version of a component [:version] and the latest version installed will be selected if no version specified --binpath string Specify the binary path of component instance -h, --help help for tiup --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string -T, --tag string Specify a tag for component instance --version version for tiup Component instances with the same "tag" will share a data directory ($TIUP_HOME/data/$tag): $ tiup --tag mycluster playground Examples: $ tiup playground # Quick start $ tiup playground nightly # Start a playground with the latest nightly version $ tiup install [:version] # Install a component of specific version $ tiup update --all # Update all installed components to the latest version $ tiup update --nightly # Update all installed components to the nightly version $ tiup update --self # Update the "tiup" to the latest version $ tiup list # Fetch the latest supported components list $ tiup status # Display all running/terminated instances $ tiup clean # Clean the data of running/terminated instance (Kill process if it's running) $ tiup clean --all # Clean the data of all running/terminated instances Use "tiup [command] --help" for more information about a command. tiup-1.16.3/tests/expected/tiup/tiup-help-ctl.output000066400000000000000000000001541505422223000224530ustar00rootroot00000000000000TiDB controllers Usage: tiup ctl {tidb/pd/tikv/binlog/etcd} [flags] Flags: -h, --help help for tiup tiup-1.16.3/tests/expected/tiup/tiup-help-doc.output000066400000000000000000000002601505422223000224340ustar00rootroot00000000000000TiDB document summary page Usage: tiup doc [flags] Flags: -h, --help help for tiup --lang string The language of the documentation: en/cn (default "en") tiup-1.16.3/tests/expected/tiup/tiup-help-install.output000066400000000000000000000011471505422223000233420ustar00rootroot00000000000000Install a specific version of a component. The component can be specified by or :. The latest stable version will be installed if there is no version specified. You can install multiple components at once, or install multiple versions of the same component: tiup install tidb:v3.0.5 tikv pd tiup install tidb:v3.0.5 tidb:v3.0.8 tikv:v3.0.9 Usage: tiup install [:version] [component2...N] [flags] Flags: -h, --help help for install Global Flags: --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string tiup-1.16.3/tests/expected/tiup/tiup-help.output000066400000000000000000000052451505422223000217010ustar00rootroot00000000000000TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system. You can run a specific version of a component via "tiup [:version]". If no version number is specified, the latest version installed locally will be used. If the specified component does not have any version installed locally, the latest stable version will be downloaded from the repository. Usage: tiup [flags] [args...] tiup [flags] [args...] Available Commands: install Install a specific version of a component list List the available TiDB components or versions uninstall Uninstall components or versions of a component update Update tiup components to the latest version status List the status of instantiated components clean Clean the data of instantiated components telemetry Controls things about telemetry help Help about any command or component Available Components: mirrors doc playground ctl Flags: -B, --binary [:version] Print binary path of a specific version of a component [:version] and the latest version installed will be selected if no version specified --binpath string Specify the binary path of component instance -h, --help help for tiup --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string -T, --tag string Specify a tag for component instance Component instances with the same "tag" will share a data directory ($TIUP_HOME/data/$tag): $ tiup --tag mycluster playground Examples: $ tiup playground # Quick start $ tiup playground nightly # Start a playground with the latest nightly version $ tiup install [:version] # Install a component of specific version $ tiup update --all # Update all installed components to the latest version $ tiup update --nightly # Update all installed components to the nightly version $ tiup update # Update the "tiup" to the latest version $ tiup list # Fetch the latest supported components list $ tiup status # Display all running/terminated instances $ tiup clean # Clean the data of running/terminated instance (Kill process if it's running) $ tiup clean --all # Clean the data of all running/terminated instances Use "tiup [command] --help" for more information about a command. tiup-1.16.3/tests/expected/tiup/tiup-install-ctl.output000066400000000000000000000000001505422223000231570ustar00rootroot00000000000000tiup-1.16.3/tests/expected/tiup/tiup-install-doc.output000066400000000000000000000000011505422223000231430ustar00rootroot00000000000000 tiup-1.16.3/tests/expected/tiup/tiup-install-not-exists.output000066400000000000000000000000501505422223000245170ustar00rootroot00000000000000Error: component `not-exists` not found tiup-1.16.3/tests/expected/tiup/tiup-install-package.output000066400000000000000000000000001505422223000237700ustar00rootroot00000000000000tiup-1.16.3/tests/expected/tiup/tiup-install-test.output000066400000000000000000000000001505422223000233540ustar00rootroot00000000000000tiup-1.16.3/tests/expected/tiup/tiup-install.output000066400000000000000000000011471505422223000224140ustar00rootroot00000000000000Install a specific version of a component. The component can be specified by or :. The latest stable version will be installed if there is no version specified. You can install multiple components at once, or install multiple versions of the same component: tiup install tidb:v3.0.5 tikv pd tiup install tidb:v3.0.5 tidb:v3.0.8 tikv:v3.0.9 Usage: tiup install [:version] [component2...N] [flags] Flags: -h, --help help for install Global Flags: --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string tiup-1.16.3/tests/expected/tiup/tiup-run-test-flag.output000066400000000000000000000002121505422223000234260ustar00rootroot00000000000000Starting component `test`: TIUP_HOME_INTEGRATION_TEST/components/test/v1.1.1/test.bin --flag value --flag2 values integration test v1.1.1 tiup-1.16.3/tests/expected/tiup/tiup-run-test-v1.1.1.output000066400000000000000000000002621505422223000233460ustar00rootroot00000000000000The component `test` is not installed; downloading from repository. Starting component `test`: TIUP_HOME_INTEGRATION_TEST/components/test/v1.1.1/test.bin integration test v1.1.1 tiup-1.16.3/tests/expected/tiup/tiup-run-test.output000066400000000000000000000002621505422223000225240ustar00rootroot00000000000000The component `test` is not installed; downloading from repository. Starting component `test`: TIUP_HOME_INTEGRATION_TEST/components/test/v1.1.2/test.bin integration test v1.1.2 tiup-1.16.3/tests/expected/tiup/tiup-run-test0.output000066400000000000000000000002621505422223000226040ustar00rootroot00000000000000The component `test` is not installed; downloading from repository. Starting component `test`: TIUP_HOME_INTEGRATION_TEST/components/test/v1.1.2/test.bin integration test v1.1.2 tiup-1.16.3/tests/expected/tiup/tiup-run-test2.output000066400000000000000000000001561505422223000226100ustar00rootroot00000000000000Starting component `test`: TIUP_HOME_INTEGRATION_TEST/components/test/v1.1.2/test.bin integration test v1.1.2 tiup-1.16.3/tests/expected/tiup/tiup-run-test3.output000066400000000000000000000001561505422223000226110ustar00rootroot00000000000000Starting component `test`: TIUP_HOME_INTEGRATION_TEST/components/test/v1.1.1/test.bin integration test v1.1.1 tiup-1.16.3/tests/expected/tiup/tiup-self-update.output000066400000000000000000000000261505422223000231520ustar00rootroot00000000000000Updated successfully! tiup-1.16.3/tests/expected/tiup/tiup-uninstall-all.output000066400000000000000000000000511505422223000235160ustar00rootroot00000000000000Uninstalled all components successfully! tiup-1.16.3/tests/expected/tiup/tiup-uninstall-test-all.output000066400000000000000000000000531505422223000244750ustar00rootroot00000000000000Uninstalled component `test` successfully! tiup-1.16.3/tests/expected/tiup/tiup-uninstall-test-v1.1.2.output000066400000000000000000000000621505422223000245520ustar00rootroot00000000000000Uninstalled component `test:v1.1.2` successfully! tiup-1.16.3/tests/expected/tiup/tiup-uninstall-test.output000066400000000000000000000001041505422223000237240ustar00rootroot00000000000000Use `tiup uninstall test --all` if you want to remove all versions. tiup-1.16.3/tests/expected/tiup/tiup-uninstall.output000066400000000000000000000016121505422223000227540ustar00rootroot00000000000000If you specify a version number, uninstall the specified version of the component. You must use --all explicitly if you want to remove all components or versions which are installed. You can uninstall multiple components or multiple versions of a component at once. The --self flag which is used to uninstall tiup. # Uninstall tiup tiup uninstall --self # Uninstall the specific version a component tiup uninstall tidb:v3.0.10 # Uninstall all version of specific component tiup uninstall tidb --all # Uninstall all installed components tiup uninstall --all Usage: tiup uninstall : [flags] Flags: --all Remove all components or versions. -h, --help help for uninstall --self Uninstall tiup and clean all local data Global Flags: --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string tiup-1.16.3/tests/expected/tiup/tiup-update-not-exists.output000066400000000000000000000000501505422223000243330ustar00rootroot00000000000000Error: component `not-exists` not found tiup-1.16.3/tests/expected/tiup/tiup-update-test-nightly.output000066400000000000000000000000261505422223000246540ustar00rootroot00000000000000Updated successfully! tiup-1.16.3/tests/expected/tiup/tiup-update-test.output000066400000000000000000000000261505422223000232000ustar00rootroot00000000000000Updated successfully! tiup-1.16.3/tests/expected/tiup/tiup.output000066400000000000000000000053351505422223000207530ustar00rootroot00000000000000TiUP is a command-line component management tool that can help to download and install TiDB platform components to the local system. You can run a specific version of a component via "tiup [:version]". If no version number is specified, the latest version installed locally will be used. If the specified component does not have any version installed locally, the latest stable version will be downloaded from the repository. Usage: tiup [flags] [args...] tiup [flags] [args...] Available Commands: install Install a specific version of a component list List the available TiDB components or versions uninstall Uninstall components or versions of a component update Update tiup components to the latest version status List the status of instantiated components clean Clean the data of instantiated components telemetry Controls things about telemetry help Help about any command or component Available Components: mirrors doc playground ctl Flags: -B, --binary [:version] Print binary path of a specific version of a component [:version] and the latest version installed will be selected if no version specified --binpath string Specify the binary path of component instance -h, --help help for tiup --skip-version-check Skip the strict version check, by default a version must be a valid SemVer string -T, --tag string Specify a tag for component instance --version version for tiup Component instances with the same "tag" will share a data directory ($TIUP_HOME/data/$tag): $ tiup --tag mycluster playground Examples: $ tiup playground # Quick start $ tiup playground nightly # Start a playground with the latest nightly version $ tiup install [:version] # Install a component of specific version $ tiup update --all # Update all installed components to the latest version $ tiup update --nightly # Update all installed components to the nightly version $ tiup update --self # Update the "tiup" to the latest version $ tiup list # Fetch the latest supported components list $ tiup status # Display all running/terminated instances $ tiup clean # Clean the data of running/terminated instance (Kill process if it's running) $ tiup clean --all # Clean the data of all running/terminated instances Use "tiup [command] --help" for more information about a command. tiup-1.16.3/tests/tiup-cluster/000077500000000000000000000000001505422223000163605ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/README.md000066400000000000000000000021411505422223000176350ustar00rootroot00000000000000This folder contains all tests which relies on external service. ## Preprations 1. Set up test environment by running `./docker/up.sh`, see [README.sh](https://github.com/pingcap/tiup/components/cluster/tree/master/docker) for detail. 2. run `docker exec -it tiup-cluster-control bash` in another terminal to proceed. ## Running > Note all this must be run inside the control node set up by docker in the last step. To run all the test: ``` ./tests/run.sh ``` To run a specify test: ``` # ./test/run.sh ./tests/run.sh test_upgrade ``` where TEST_NAME is the file name of `tests/*.sh` The flowing environment can control the testing version of cluster: - `version` The version of cluster to be deploy and test. - `old_version` The version of cluster to be deploy and upgrade to `version` For example: ``` version=v4.0.0-rc old_version=v3.0.12 ./tests/run.sh ``` This will test using version `v4.0.0-rc` , and upgrade from `v3.0.12` to `v4.0.0-rc` when testing upgrade in `tests/test_upgrade.sh` ## Writing new tests New integration tests can be written as shell script in `tests/TEST_NAME.sh`. tiup-1.16.3/tests/tiup-cluster/cover/000077500000000000000000000000001505422223000174765ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/cover/.gitkeep000066400000000000000000000000001505422223000211150ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/local/000077500000000000000000000000001505422223000174525ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/local/alertmanager/000077500000000000000000000000001505422223000221145ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/local/alertmanager/alertmanager.yml000066400000000000000000000037731505422223000253130ustar00rootroot00000000000000# this is a test message: magic-string-for-test global: # The smarthost and SMTP sender used for mail notifications. smtp_smarthost: "localhost:25" smtp_from: "alertmanager@example.org" smtp_auth_username: "alertmanager" smtp_auth_password: "password" # smtp_require_tls: true # The Slack webhook URL. # slack_api_url: '' route: # A default receiver receiver: "db-alert-email" # The labels by which incoming alerts are grouped together. For example, # multiple alerts coming in for cluster=A and alertname=LatencyHigh would # be batched into a single group. group_by: ["env", "instance", "alertname", "type", "group", "job"] # When a new group of alerts is created by an incoming alert, wait at # least 'group_wait' to send the initial notification. # This way ensures that you get multiple alerts for the same group that start # firing shortly after another are batched together on the first # notification. group_wait: 30s # When the first notification was sent, wait 'group_interval' to send a batch # of new alerts that started firing for that group. group_interval: 3m # If an alert has successfully been sent, wait 'repeat_interval' to # resend them. repeat_interval: 3m routes: # - match: # receiver: webhook-kafka-adapter # continue: true # - match: # env: test-cluster # receiver: db-alert-slack # - match: # env: test-cluster # receiver: db-alert-email receivers: # - name: 'webhook-kafka-adapter' # webhook_configs: # - send_resolved: true # url: 'http://10.0.3.6:28082/v1/alertmanager' #- name: 'db-alert-slack' # slack_configs: # - channel: '#alerts' # username: 'db-alert' # icon_emoji: ':bell:' # title: '{{ .CommonLabels.alertname }}' # text: '{{ .CommonAnnotations.summary }} {{ .CommonAnnotations.description }} expr: {{ .CommonLabels.expr }} http://172.0.0.1:9093/#/alerts' - name: "db-alert-email" email_configs: - send_resolved: true to: "xxx@xxx.com" tiup-1.16.3/tests/tiup-cluster/local/grafana/000077500000000000000000000000001505422223000210515ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/local/grafana/tidb.json000066400000000000000000000676051505422223000227040ustar00rootroot00000000000000{ "__inputs": [ { "name": "test", "label": "test", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "6.1.6" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" } ], "annotations": { "list": [ { "builtIn": 1, "datasource": "test", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 1, "id": null, "links": [], "panels": [ { "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 138, "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB query durations by histogram buckets with different percents", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 1 }, "id": 80, "legend": { "alignAsTable": false, "avg": false, "current": false, "hideEmpty": false, "hideZero": false, "max": false, "min": false, "rightSide": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "histogram_quantile(0.999, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "999", "refId": "A" }, { "expr": "histogram_quantile(0.99, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "99", "refId": "B" }, { "expr": "histogram_quantile(0.95, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "95", "refId": "C" }, { "expr": "histogram_quantile(0.80, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "80", "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Duration", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "s", "label": null, "logBase": 2, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB query processing numbers per second", "editable": true, "error": false, "fill": 1, "grid": {}, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 1 }, "id": 42, "legend": { "alignAsTable": false, "avg": false, "current": true, "hideEmpty": true, "hideZero": false, "max": true, "min": false, "rightSide": false, "show": true, "sideWidth": 250, "sort": "max", "sortDesc": false, "total": false, "values": true }, "lines": true, "linewidth": 1, "links": [], "maxPerRow": 1, "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum(rate(tidb_server_query_total[1m])) by (result)", "format": "time_series", "instant": false, "intervalFactor": 2, "legendFormat": "query {{result}}", "refId": "A", "step": 60 }, { "expr": "sum(rate(tidb_server_query_total{result=\"OK\"}[1m] offset 1d))", "format": "time_series", "hide": true, "instant": false, "intervalFactor": 2, "legendFormat": "yesterday", "refId": "B", "step": 90 }, { "expr": "sum(tidb_server_connections) * sum(rate(tidb_server_handle_query_duration_seconds_count[1m])) / sum(rate(tidb_server_handle_query_duration_seconds_sum[1m]))", "format": "time_series", "hide": true, "instant": false, "intervalFactor": 2, "legendFormat": "ideal QPS", "refId": "C", "step": 60 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "QPS", "tooltip": { "msResolution": true, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": "0", "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "decimals": null, "description": "TiDB statement statistics by statement type", "editable": true, "error": false, "fill": 1, "grid": {}, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 7 }, "id": 21, "legend": { "alignAsTable": true, "avg": true, "current": true, "hideEmpty": true, "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, "sort": null, "sortDesc": null, "total": false, "values": true }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum(rate(tidb_executor_statement_total[1m])) by (type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", "refId": "A", "step": 30 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Statement OPS", "tooltip": { "msResolution": false, "shared": true, "sort": 0, "value_type": "cumulative" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 2, "max": null, "min": "0", "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB query total statistics including both successful and failed ones", "editable": true, "error": false, "fill": 0, "grid": {}, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 7 }, "id": 2, "legend": { "alignAsTable": true, "avg": false, "current": true, "hideEmpty": true, "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, "sideWidth": 250, "sort": "max", "sortDesc": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "links": [], "maxPerRow": 1, "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { "alias": "total", "lines": false } ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(tidb_server_query_total[1m])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{instance}} {{type}} {{result}}", "refId": "A", "step": 30 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "QPS By Instance", "tooltip": { "msResolution": true, "shared": true, "sort": 2, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": "0", "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "decimals": null, "description": "TiDB failed query statistics by query type", "fill": 0, "gridPos": { "h": 6, "w": 24, "x": 0, "y": 13 }, "id": 137, "legend": { "alignAsTable": true, "avg": false, "current": true, "hideEmpty": true, "hideZero": true, "max": true, "min": false, "rightSide": true, "show": true, "sideWidth": 250, "total": false, "values": true }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "sum(increase(tidb_server_execute_error_total[1m])) by (type, instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": " {{type}}-{{instance}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Failed Query OPM", "tooltip": { "shared": true, "sort": 2, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 2, "max": null, "min": "0", "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB slow query statistics with slow query durations and coprocessor waiting/executing durations", "fill": 1, "gridPos": { "h": 6, "w": 24, "x": 0, "y": 19 }, "id": 112, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null as zero", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "histogram_quantile(0.90, sum(rate(tidb_server_slow_query_process_duration_seconds_bucket[1m])) by (le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "all_proc", "refId": "A" }, { "expr": "histogram_quantile(0.90, sum(rate(tidb_server_slow_query_cop_duration_seconds_bucket[1m])) by (le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "all_cop_proc", "refId": "B" }, { "expr": "histogram_quantile(0.90, sum(rate(tidb_server_slow_query_wait_duration_seconds_bucket[1m])) by (le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "all_cop_wait", "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Slow query", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "s", "label": null, "logBase": 2, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB durations for different query types with 99.9 percent buckets", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 25 }, "id": 136, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "histogram_quantile(0.999, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le,sql_type))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{sql_type}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "999 Duration", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "s", "label": null, "logBase": 2, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB durations for different query types with 99 percent buckets", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 25 }, "id": 134, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "histogram_quantile(0.99, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le,sql_type))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{sql_type}}", "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "99 Duration", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "s", "label": null, "logBase": 2, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB durations for different query types with 95 percent buckets", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 31 }, "id": 132, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "histogram_quantile(0.95, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le,sql_type))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{sql_type}}", "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "95 Duration", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "s", "label": null, "logBase": 2, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "TiDB durations for different query types with 80 percent buckets", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 31 }, "id": 130, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "histogram_quantile(0.80, sum(rate(tidb_server_handle_query_duration_seconds_bucket[1m])) by (le,sql_type))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{sql_type}}", "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "80 Duration", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "s", "label": null, "logBase": 2, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "repeat": null, "title": "Query Summary", "type": "row" } ], "refresh": "30s", "schemaVersion": 18, "style": "dark", "tags": [], "templating": { "list": [] }, "time": { "from": "now-1h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "browser", "title": "test-TiDB (magic-string-for-test)", "uid": "000000011", "version": 3 } tiup-1.16.3/tests/tiup-cluster/local/prometheus/000077500000000000000000000000001505422223000216455ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/local/prometheus/tidb.rules.yml000066400000000000000000000010571505422223000244460ustar00rootroot00000000000000# magic-string-for-test groups: - name: alert.rules rules: - alert: TiDB_schema_error expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 for: 1m labels: env: ENV_LABELS_ENV level: emergency expr: increase(tidb_session_schema_lease_error_total{type="outdated"}[15m]) > 0 annotations: description: "cluster: ENV_LABELS_ENV, instance: {{ $labels.instance }}, values:{{ $value }}" value: "{{ $value }}" summary: TiDB schema error tiup-1.16.3/tests/tiup-cluster/root.json000066400000000000000000000161531505422223000202440ustar00rootroot00000000000000{"signatures":[{"keyid":"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002","sig":"kmRKAh18iBSxTU20AlqCFYV5w4OgK72RGqN5sGkDy5I+xxKlMgq2xvlWH329bpRdtmajd6wozb6ibEhiijWei7DnQCkJPVw1Y5DdKyRAHjM8ZYeawVNaUCh2VH13is6cbzSPp30CKZ5whtuERAgkLsX/M0E6bterzhQR//PTNfO41/NOgXnyqGgOslzM46N86rCBPcQwHcyHV0voXX1zGpCSDLkODEbi6ILweJl0iNnaBF+lBzvW6N+oqmEUUzbH7tLSCsJVyj3tLSlPvbTO8BoWnkN1k67WNkBBfA9vD0s46j2hWGuTMApiUa5Iou6/N0JI9Sb2kZAle15TJQJvEA=="},{"keyid":"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae","sig":"YNGnO9+vcnfwI0ELBdJZh1KEu62NIPZcV2TKfxEBaj7g/Fm1HO3ReAH36dew9MytDPRuIdCXYHVSOQfiBQCuJCF7k/hRQwEFlrJWHMuN2rz7YLqTUyBzkE/PbAEOTtl4C/Q8bxHZ6tHdOJAUIBsXTy4yRZVempUtIULjPNh2d6BPt7x61+3RwhhrPHKFxy2I1hVaZIMfO94Ofb4iwd0UJ1YuQdDqIP75+YtoYUsBFKee4AqvRgKrp5rqQvn0CGOOreOIXvTsszvuzxR26kWvOPPkS3zJRwTu7TW0pE0AsUgEKHKGDoShNdcmx+hGz8mn8BpAivxvJhqqJsz9nazkZA=="},{"keyid":"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a","sig":"X2FykukrMhf2qeK4bgOTy272VMFJeHAQYNGayckbjfdfToTfnRt+mwV1x5jUFTq39XsEPC6wgTs3ZWQ/3DJ1Wv7oyytN6QTPvblcFnAHNdCNGU2QceyG1N9efRbnpL+VzaWVmpwqrV2DjSHNRYMJsCfwbuC9ZCZ4HFL+bv/InZnM7Zxg7Yrl3Vzs6gOYnlZr19vgQOW/n7CCYgE2X9iS+Y3/8ALFtgU+CJEjAXK37N769H+kYG+IuuhSobdhBk/ie+oXTxm/Y5BjNBvBqetUVpCdqw/PpJ+vpN7KBGGVg3ij10wA2a1B+CA1dxjF6Tzg/HAnNRidZrbmWrkc8I69IA=="},{"keyid":"d499c8ca3c2018885037da89bdfa327e21a0e6a15e2ca8dabe5e78a3edf9c91c","sig":"wo6iQdse3ADk4SirmdjCdjOnm9sg6ztKEAFfi2BRZE1Z8FlnzUp8S+OztmgRqsVVfGEDAwgWJeimRjohznKBz0tS3bZjzLdHllw0ZHv1n+i8vaBVA5uvpZuzjpCdQbNEh4o9yrVv7hqyFVva5LEMJelOkWV6TvdgdPztHC7B7neyNm7I/0QI3mG8HHFen/CX2CHJu6OQ29XHjbe79VnTbo2ujK9b3ZIHivRMpND0/DBKxSWuXvQ2BSC1nYNDBoJ2AneOQ+aUM6WMUQst8ct9pGlZszyZX/8A1Pbj/e1ToPgkEDSqg2w+Bxty0allc4nyaijzBDo6SoKoWfXKi9wq0Q=="},{"keyid":"70033289cef8e5105914b22ecc7f15e271d0650d5216d1ab0bd67f8440411bb6","sig":"OoK5e9uZ6xu4b6WQjbJge2hsJR10DJ70vEsFRY26C52m5WOrhOJeDxthy5wF01P3odB5hWAAKYDRzB+wUy/h05O33ZXxYrbtqDkwkBYOPRr0X24MDz2nD+etOXyed69V77xi62JYSbl9aP5ItKDGrrYDkOzkQcP2q87eaVJxRJEm0qrA4Jtz8oZ/nOZwX9vZRmd/qNorye/On8yxbtueqCkTVKULI0c/CwBw/rAr+THa1alPqn24djS3MqBst+DDRWR3996M9Cz5agWBASo6TSXNoUnIOGyza7zqoNsdrePLHCbInUWuJ+lN2ilGTgl6ERyYoc0+QFmRaVljG5pGcw=="}],"signed":{"_type":"root","expires":"2024-07-26T11:18:30+08:00","roles":{"index":{"keys":{"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn5kVA7MlBfSe7EBaExjl\nKbwoDkn1aYi74s29mFgtRo8nejbrVvZQMCIUhvKc0pFa/l9JD/QY6/nAOCE1lpzi\nwwNkSntfOo3p3HQIR+Ut7hZ4Sxfe/5JagGo3LQ+Hd3EJWUxyEfQ/Bff07F3XAbqM\n5+cKNrdsKWZJcPiJDW621qGwCx52f+gzl9bnFe4/hx34OUgirwqh5DS+LhIO+/yt\nbOiN1AyjQKlnb8lUnblElS4Njd+F4io5VzSrZYi2+4AbTkO6wLwbsWHMzXfv9qwn\nvllufOHpB6EwiQ/xBOMuvJJymHnZvs8AH4SuydQIXLaJuv1ysFaBs0KB/ktbakSK\nLwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/index.json"},"root":{"keys":{"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyDwCfCl30vhyJW7fB1bs\npRYKtBKzl7o0qnJTm+IksjQ8RXxj8osUpMLmSvOzCaJ5Wxe+Pm1LpSTDbbubbgvd\nnmEFL6228sifviNIu2HlIl+agfzmXuJ9OBlzGUaI4gAd1Z6pF6+mjlcjz2PbWF84\nAbXZdK49uluqulp7HrGB/qNjGcIRUCHgDU4nnq0OkI1BZZSKm9ovonqDkIK76x/S\niAD9OjKsjQ/s57tE+5WTVObKpfrfK0JeHdpAUsA/2n4L1Z6FmZD4LZWqb0i+C7xj\nMElC99KtjlwRntcjeVWG9YjU8AcEN0n1gON9S2oRdyyAzDTgGb7WueDnn6qstt5w\nSQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"70033289cef8e5105914b22ecc7f15e271d0650d5216d1ab0bd67f8440411bb6":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApU5RHP0K+Fgkzm9L0yHR\n4CEqLLwHr7hQxjWOaq5K0UfaOKcsPQZ1SkJ/AMppz7ovzwOU4hcy0wJOV7ms6ACk\nS3hte2GlH/xp+OzWiRnI4qJ6GRrAe+ototj1ZMGvpLK4ifxkKaY6vuWFFAeS0fSe\nPHUGAl5v+PaJWgDNQTRmuAu5oCaYP6oT6VKHj6ulLAgAOqWsBSJiK3oIRcWPR+uI\nIW/9BV158wfmxAw1+7ch1RD44+1vV3+Eo94alvVZIAfcJqDS3XGr2Hfd/YWGj1d2\nD26eblBJoQt0L2E2EL8igu1sudVkMZ3NAIfmBrOWUxHEbIjYeKvXPbaSGdC+FoXD\nrwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOgQkwLOh31QV9OpbO9v\n6o83durJFGPOnVXZiab83pKaSk7HEK9WzXBq0BaPvtFwSfROVdpgtopri5lZi+uH\naMKLUn5F8XRnSMl/7m5vM4XpZZYa4aQId4TWdbFtTu31eHGZ3eEC5nDRJ5NhZOJd\nKLFBu/xmxrh/eNZt4QbdWLZayjHnzyoy5AnfNTR6nJgPAv+rBOqyqT/r14q4Pngh\n3z0I3pNFr5qmxsp013XV+kgOW1F7zT7IMU8xRIgo85UWUNhax0/bjY/2NI1Z+WjR\nyhZmUBMVYWvfw97xDUrvBvrJxZPgg0lGvxJC6LF2dM7wgLaNx9khT6HMBVxjxLMs\nDQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"d499c8ca3c2018885037da89bdfa327e21a0e6a15e2ca8dabe5e78a3edf9c91c":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5oDytiywLDOSpIBovZxx\nNlZJg5Gk3O9kpiOQ0XnD+L2LV+a2dJU1KmBOoGCUr2TNaGTPihAStjpFIsW4c7Ye\nB2RjUFUrXRf3mvc3n4fACayenxtnCleSR4gKkAdHqqPCiWHT5TAtybKSHuHAluUL\nkMvavUZjIPMj0YYB0R8Re7BjU+zxnipJosTbbPQ7fa3+x2VAHc066Y9qp1YucdpB\nMZ3UwtSVNK7aCbFZvKPwAm22fnDYmMbYFeTz/rrl8k+rKTM37d4D3mURC9xDJxIP\nXVaU2dBImYjoFcY0/5oBU5vr1sj2sdUH+3G5AUr6iCL+XJLiwA1x24jKA6mUjQ93\ndwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":3,"url":"/root.json"},"snapshot":{"keys":{"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqTZx29eJR5EumjqM4YTb\nFlKbim1GNYmtbCLH51BbU2lt46ddmfGvtGsxTD3mIZ/GEHVFv6Aei3xx5nIfhGP0\nrG78JRz394uU8Pd62DiIFWYizr5o+ZBZu29D2YK5ZtxoLFpgt0ibnINK2NcesDC8\nSqfIUbMiQFT6yB/MYD275SjfRGHOeYTPmKdjMJrhLL2cfIPYnQ0QFYIyMvXBG1Fj\nU0rc9UclYQHh9YheIDVYI9YCo/DWP3KFfRJpoTjQRGoPSK9TXcpCAEzQpEG3jOek\n9PdV9Ol6/O8JbrFwXWF3LhkUThg+zCjV4qHtP4oqp5QCqzTQTXGQ9qxWUSlHi4Eu\nIwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/snapshot.json"},"timestamp":{"keys":{"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzTgV5iKhMnunUDxt4PB\npYqTMPaJN/ZdOOsP6cS3DeCE/EcYGfgCjvP7KD3gjG98VDBTVcuwZClSy+/zvHhV\nIq7VWu+yxQL5c6oa1xpCyHoA96JiLIDPhmqEdscdRybcRQ2CYywzKA8jSwEQCnEK\nc8a74ceY352l/MEcOem0+AtKrOjqcjbXCayDwC9yTg/c78bkp+4T8AhSWgt6Tlrt\nY8jLE7zwojFtIYtMwobWRIW2O3nJDXiSBbTPG3M9kF1G43INshSdBcuq5Tmy8lpE\n/XiG/E7+hP63Hm+KAcdvl553Zs7pLhAZxV0kqlApqRRwhscw+JQci8sVONun5t9t\nNwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/timestamp.json"}},"spec_version":"0.1.0","version":4}}tiup-1.16.3/tests/tiup-cluster/run.sh000077500000000000000000000030761505422223000175310ustar00rootroot00000000000000#!/bin/bash set -eu # Change directory to the source directory of this script. Taken from: # https://stackoverflow.com/a/246128/3858681 pushd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" PATH=$PATH:/tiup-cluster/bin export TIUP_CLUSTER_PROGRESS_REFRESH_RATE=10s export TIUP_CLUSTER_EXECUTE_DEFAULT_TIMEOUT=300s export DEBUG_CHECKPOINT=1 export version=${version-v4.0.12} export old_version=${old_version-v3.0.20} # Prepare local config echo "preparing local config" ls -lh ./local rm -rf /tmp/local cp -r ./local /tmp/local ls -lh /tmp/local function tiup-cluster() { mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ # echo "in function" if [ -f "./bin/tiup-cluster.test" ]; then ./bin/tiup-cluster.test -test.coverprofile=./cover/cov.itest-$(date +'%s')-$RANDOM.out __DEVEL--i-heard-you-like-tests "$@" else ../../bin/tiup-cluster "$@" fi } function tiup() { mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ if [ -f "../tiup/bin/tiup.test" ]; then ../tiup/bin/tiup.test -test.coverprofile=./cover/cov.itest-$(date +'%s')-$RANDOM.out __DEVEL--i-heard-you-like-tests "$@" else ../../bin/tiup "$@" fi } . ./script/util.sh # use run.sh test_cmd test_upgrade to run specify cases do_cases=$* if [ "$do_cases" == "" ]; then for script in ./test_*.sh; do echo "run test: $script" . $script done else for script in "${do_cases[@]}"; do echo "run test: $script.sh" . ./$script.sh done fi echo -e "\033[0;36m<<< Run all test success >>>\033[0m" tiup-1.16.3/tests/tiup-cluster/script/000077500000000000000000000000001505422223000176645ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/script/cmd_subtest.sh000077500000000000000000000177441505422223000225540ustar00rootroot00000000000000#!/bin/bash set -eu function cmd_subtest() { mkdir -p ~/.tiup/bin/ version="nightly" topo_name="full" test_tls=false native_ssh=false while [[ $# -gt 0 ]] do case $1 in --version) version="$2" shift shift ;; --topo) topo_name="$2" shift shift ;; --tls) test_tls=true shift ;; --native-ssh) native_ssh=true shift ;; esac done name="test_cmd_$RANDOM" if [ $test_tls = true ]; then topo=./topo/${topo_name}_tls.yaml else topo=./topo/${topo_name}.yaml fi client="" if [ $native_ssh == true ]; then client="--ssh=system" fi # identify SSH via ssh-agent eval $(ssh-agent) ssh-add /root/.ssh/id_rsa mv /root/.ssh/id_rsa{,.bak} tiup-cluster $client check $topo -i ~/.ssh/id_rsa --enable-mem --enable-cpu --apply mv /root/.ssh/id_rsa{.bak,} check_result=`tiup-cluster $client --yes check $topo -i ~/.ssh/id_rsa` # check the check result echo $check_result | grep "cpu-cores" echo $check_result | grep "memory" echo $check_result | grep "os-version" echo $check_result | grep "selinux" echo $check_result | grep "service" echo $check_result | grep "thp" for i in {1..5}; do ssh -o "StrictHostKeyChecking=no" -o "PasswordAuthentication=no" n"$i" "grep -q tidb /etc/passwd && (killall -u tidb; userdel -f -r tidb) || true"; done # This should fail because there is no such user: tidb ! tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa --skip-create-user # This is a normal deploy tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa # Cleanup cluster meta and test --skip-create-user again, this should success rm -rf ~/.tiup/storage/cluster/clusters/$name tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa --skip-create-user # check the local config tiup-cluster $client exec $name -N n1 --command "grep tidb.rules.yml /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" ! tiup-cluster $client exec $name -N n1 --command "grep node.rules.yml /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/prometheus-9090/conf/tidb.rules.yml" tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/alertmanager-9093/conf/alertmanager.yml" tiup-cluster $client list | grep "$name" tiup-cluster $client audit | grep "deploy $name $version" # Get the audit id can check it just runnable id=`tiup-cluster audit | grep "deploy $name $version" | awk '{print $1}'` tiup-cluster $client audit $id tiup-cluster $client --yes start $name # Patch a stopped cluster tiup-cluster $client --yes patch $name ~/.tiup/storage/cluster/packages/tidb-v$version-linux-amd64.tar.gz -R tidb --offline tiup-cluster $client display $name | grep "tidb (patched)" tiup-cluster $client _test $name writable # check the data dir of tikv # it's ok to omit client type after deploy tiup-cluster exec $name -N n1 --command "grep /home/tidb/deploy/tikv-20160/data /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" tiup-cluster exec $name -N n1 --command "grep advertise-status-addr /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" tiup-cluster exec $name -N n3 --command "grep /home/tidb/my_kv_data /home/tidb/deploy/tikv-20160/scripts/run_tikv.sh" # test checkpoint tiup-cluster exec $name -N n1 --command "touch /tmp/checkpoint" tiup-cluster exec $name -N n1 --command "ls /tmp/checkpoint" tiup-cluster exec $name -N n1 --command "rm -f /tmp/checkpoint" id=`tiup-cluster audit | grep "exec $name" | grep "ls /tmp/checkpoint" | awk '{print $1}'` tiup-cluster replay --yes $id ! tiup-cluster exec $name -N n1 --command "ls /tmp/checkpoint" # test patch overwrite tiup-cluster $client --yes patch $name ~/.tiup/storage/cluster/packages/tidb-v$version-linux-amd64.tar.gz -R tidb --overwrite # overwrite with the same tarball twice tiup-cluster $client --yes patch $name ~/.tiup/storage/cluster/packages/tidb-v$version-linux-amd64.tar.gz -R tidb --overwrite # test patch with a non-executable entry rm -rf tidb-server touch tidb-server # this is a non-executable regular file tar -czf tidb-non-executable.tar.gz tidb-server ! tiup-cluster $client --yes patch $name ./tidb-non-executable.tar.gz -R tidb # test patch with a dir entry rm -rf tidb-server mkdir tidb-server tar -czf tidb-dir-entry.tar.gz tidb-server ! tiup-cluster $client --yes patch $name ./tidb-dir-entry.tar.gz -R tidb tiup-cluster $client --yes stop $name # test start prometheus,grafana won't hang-forever(can't update topology) # let the CI to stop the job if hang forever ! tiup-cluster $client --yes start $name -R prometheus,grafana tiup-cluster $client --yes restart $name tiup-cluster $client _test $name writable tiup-cluster $client _test $name data display_result=`tiup-cluster $client display $name` echo "$display_result" | grep "Cluster type" echo "$display_result" | grep "Cluster name" echo "$display_result" | grep "Cluster version" echo "$display_result" | grep "Dashboard URL" echo "$display_result" | grep "Total nodes" echo "$display_result" | grep -v "Since" # display with --uptime should show process uptime display_result=`tiup-cluster $client display $name --uptime` echo "$display_result" | grep "Since" # Test rename tiup-cluster $client --yes rename $name "tmp-cluster-name" tiup-cluster $client display "tmp-cluster-name" tiup-cluster $client --yes rename "tmp-cluster-name" $name # Test enable & disable tiup-cluster $client exec $name -R tidb --command="systemctl status tidb-4000|grep 'enabled;'" tiup-cluster $client exec $name -R pd --command="systemctl status pd-2379|grep 'enabled;'" tiup-cluster $client disable $name -R tidb tiup-cluster $client exec $name -R tidb --command="systemctl status tidb-4000|grep 'disabled;'" tiup-cluster $client exec $name -R pd --command="systemctl status pd-2379|grep 'enabled;'" tiup-cluster $client disable $name tiup-cluster $client exec $name -R pd --command="systemctl status pd-2379|grep 'disabled;'" tiup-cluster $client enable $name tiup-cluster $client exec $name -R tidb --command="systemctl status tidb-4000|grep 'enabled;'" tiup-cluster $client exec $name -R pd --command="systemctl status pd-2379|grep 'enabled;'" tiup-cluster $client --yes clean $name --data --all --ignore-node n1:9090 # Test push and pull echo "test_transfer $name $RANDOM `date`" > test_transfer_1.txt tiup-cluster $client push $name test_transfer_1.txt "{{ .DeployDir }}/test_transfer.txt" -R grafana tiup-cluster $client pull $name "{{ .DeployDir }}/test_transfer.txt" test_transfer_2.txt -R grafana diff test_transfer_1.txt test_transfer_2.txt rm -f test_transfer_{1,2}.txt echo "checking cleanup data and log" tiup-cluster $client exec $name -N n1 --command "ls /home/tidb/deploy/prometheus-9090/log/prometheus.log" ! tiup-cluster $client exec $name -N n1 --command "ls /home/tidb/deploy/tikv-20160/log/tikv.log" tiup-cluster $client --yes start $name ! tiup-cluster $client _test $name data cp ~/.tiup/storage/cluster/clusters/$name/ssh/id_rsa "/tmp/$name.id_rsa" tiup-cluster $client --yes destroy $name # after destroy the cluster, the public key should be deleted ! ssh -o "StrictHostKeyChecking=no" -o "PasswordAuthentication=no" -i "/tmp/$name.id_rsa" tidb@n1 "ls" unlink "/tmp/$name.id_rsa" } tiup-1.16.3/tests/tiup-cluster/script/detect_error.sh000077500000000000000000000005071505422223000227060ustar00rootroot00000000000000#!/bin/bash set -eu err_num=$(find $1 -name "*.log" -exec grep "\[ERROR\]" {} \; | wc -l) if [ ${err_num} != "0" ]; then echo "detect ${err_num} [ERROR] log" fi err_num=$(find $1 -name "*stderr.log" -exec cat {} \; | wc -l) if [ ${err_num} != "0" ]; then echo "detect ${err_num} stderr log" fi echo "no error log found" tiup-1.16.3/tests/tiup-cluster/script/pull_log.sh000077500000000000000000000010471505422223000220420ustar00rootroot00000000000000#!/bin/bash out_dir=$1 ipprefix=${TIUP_TEST_IP_PREFIX:-"172.19.0"} mkdir -p $out_dir for i in {100..105} do h="${ipprefix}.${i}" echo $h mkdir -p $out_dir/$h if [ "$i" == "100" ]; then find ~/.tiup/logs -type f -name "*.log" -exec cp "{}" $out_dir/$h \; else logs=$(ssh -o "StrictHostKeyChecking no" root@$h "find /home/tidb | grep '.*log/.*\.log'") for log in $logs do scp -o "StrictHostKeyChecking no" -pr root@$h:$log "$out_dir/$h/" done fi done chmod -R 777 $out_dir tiup-1.16.3/tests/tiup-cluster/script/scale_core.sh000077500000000000000000000123151505422223000223240ustar00rootroot00000000000000#!/bin/bash set -eu function scale_core() { mkdir -p ~/.tiup/bin/ version=$1 test_tls=$2 native_ssh=$3 client="" if [ $native_ssh == true ]; then client="--ssh=system" fi name="test_scale_core_$RANDOM" if [ $test_tls = true ]; then topo=./topo/full_tls.yaml else topo=./topo/full.yaml fi tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa tiup-cluster $client list | grep "$name" tiup-cluster $client --yes start $name tiup-cluster $client _test $name writable tiup-cluster $client display $name tiup-cluster $client --yes reload $name --skip-restart if [ $test_tls = true ]; then total_sub_one=19 total=20 total_add_one=21 else total_sub_one=24 total=25 total_add_one=26 fi echo "start scale in tidb" tiup-cluster $client --yes scale-in $name -N n1:4000 wait_instance_num_reach $name $total_sub_one $native_ssh # ensure Prometheus's configuration is updated automatically ! tiup-cluster $client exec $name -N n1 --command "grep -q n1:10080 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" echo "start scale out tidb" topo=./topo/full_scale_in_tidb.yaml tiup-cluster $client --yes scale-out $name $topo # after scale-out, ensure the service is enabled tiup-cluster $client exec $name -N n1 --command "systemctl status tidb-4000 | grep Loaded |grep 'enabled; vendor'" tiup-cluster $client exec $name -N n1 --command "grep -q n1:10080 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" # scale in tikv maybe exists in several minutes or hours, and the GitHub CI is not guaranteed # echo "start scale in tikv" # tiup-cluster --yes scale-in $name -N n3:20160 # wait_instance_num_reach $name $total_sub_one $native_ssh # echo "start scale out tikv" # topo=./topo/full_scale_in_tikv.yaml # tiup-cluster --yes scale-out $name $topo echo "start scale in pump" tiup-cluster $client --yes scale-in $name -N n3:8250 wait_instance_num_reach $name $total_sub_one $native_ssh # ensure Prometheus's configuration is updated automatically ! tiup-cluster $client exec $name -N n1 --command "grep -q n3:8250 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" echo "start scale out pump" topo=./topo/full_scale_in_pump.yaml tiup-cluster $client --yes scale-out $name $topo # after scale-out, ensure this instance come back tiup-cluster $client exec $name -N n1 --command "grep -q n3:8250 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" echo "start scale in pd" tiup-cluster $client --yes scale-in $name -N n3:2379 wait_instance_num_reach $name $total_sub_one $native_ssh # validate https://github.com/pingcap/tiup/issues/786 # ensure that this instance is removed from the startup scripts of other components that need to rely on PD ! tiup-cluster $client exec $name -N n1 --command "grep -q n3:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh" # ensure Prometheus's configuration is updated automatically ! tiup-cluster $client exec $name -N n1 --command "grep -q n3:2379 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" echo "start scale out pd" topo=./topo/full_scale_in_pd.yaml tiup-cluster $client --yes scale-out $name $topo # after scale-out, ensure this instance come back tiup-cluster $client exec $name -N n1 --command "grep -q n3:2379 /home/tidb/deploy/tidb-4000/scripts/run_tidb.sh" tiup-cluster $client exec $name -N n1 --command "grep -q n3:2379 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" echo "start scale out tiproxy" topo=./topo/full_scale_in_tiproxy.yaml tiup-cluster $client --yes scale-out $name $topo wait_instance_num_reach $name $total_add_one $native_ssh echo "start scale in tiproxy" tiup-cluster $client --yes scale-in $name -N n1:6000 wait_instance_num_reach $name $total $native_ssh echo "start scale in tidb" tiup-cluster $client --yes scale-in $name -N n2:4000 wait_instance_num_reach $name $total_sub_one $native_ssh ! tiup-cluster $client exec $name -N n2 --command "ls /home/tidb/deploy/monitor-9100/deploy/monitor-9100" ! tiup-cluster $client exec $name -N n2 --command "ps aux | grep node_exporter | grep -qv grep" ! tiup-cluster $client exec $name -N n2 --command "ps aux | grep blackbox_exporter | grep -qv grep" # after all components on the node were scale-ined, the SSH public is automatically deleted ! ssh -o "StrictHostKeyChecking=no "-o "PasswordAuthentication=no" -i ~/.tiup/storage/cluster/$name/ssh/id_rsa tidb@n2 "ls" echo "start scale out tidb" topo=./topo/full_scale_in_tidb_2nd.yaml tiup-cluster $client --yes scale-out $name $topo # after scalue-out, ensure node_exporter and blackbox_exporter come back tiup-cluster $client exec $name -N n2 --command "ls /home/tidb/deploy/monitor-9100/deploy/monitor-9100" tiup-cluster $client exec $name -N n2 --command "ps aux | grep node_exporter | grep -qv grep" tiup-cluster $client exec $name -N n2 --command "ps aux | grep blackbox_exporter | grep -qv grep" tiup-cluster $client _test $name writable tiup-cluster $client --yes destroy $name } tiup-1.16.3/tests/tiup-cluster/script/scale_tiproxy.sh000066400000000000000000000056701505422223000231150ustar00rootroot00000000000000#!/bin/bash set -eu function scale_tiproxy() { mkdir -p ~/.tiup/bin/ version=$1 native_ssh=$2 common_args="--wait-timeout=360" if [ $native_ssh == true ]; then common_args="$common_args --ssh=system" fi name="test_scale_tiproxy_$RANDOM" topo=./topo/tiproxy.yaml check_cert_file="ls /home/tidb/deploy/tidb-4000/tls/tiproxy-session.crt /home/tidb/deploy/tidb-4000/tls/tiproxy-session.key" check_cert_config="grep -q session-token-signing-key /home/tidb/deploy/tidb-4000/conf/tidb.toml" tiup-cluster $common_args --yes deploy $name $version $topo -i ~/.ssh/id_rsa # the session certs exist tiup-cluster $common_args exec $name -N n1 --command "$check_cert_file" # the configurations are updated tiup-cluster $common_args exec $name -N n1 --command "$check_cert_config" tiup-cluster $common_args list | grep "$name" tiup-cluster $common_args --yes start $name tiup-cluster $common_args _test $name writable tiup-cluster $common_args display $name tiup-cluster $common_args --yes reload $name --skip-restart total_sub_one=7 total=8 # disable tiproxy echo "start scale in tiproxy" tiup-cluster $common_args --yes scale-in $name -N n1:6000 wait_instance_num_reach $name $total $native_ssh # scale in tidb and scale out again echo "start scale in tidb" tiup-cluster $common_args --yes scale-in $name -N n2:4000 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out tidb" topo=./topo/full_scale_in_tidb_2nd.yaml tiup-cluster $common_args --yes scale-out $name $topo # the session certs don't exist on the new tidb ! tiup-cluster $common_args exec $name -N n2 --command "$check_cert_file" # the configurations are not updated on the new tidb ! tiup-cluster $common_args exec $name -N n2 --command "$check_cert_config" # enable tiproxy again echo "start scale out tiproxy" topo=./topo/full_scale_in_tiproxy.yaml tiup-cluster $common_args --yes scale-out $name $topo # the session certs exist on the new tidb tiup-cluster $common_args exec $name -N n2 --command "$check_cert_file" # the configurations are updated on the new tidb tiup-cluster $common_args exec $name -N n2 --command "$check_cert_config" # scale in tidb and scale out again echo "start scale in tidb" tiup-cluster $common_args --yes scale-in $name -N n2:4000 wait_instance_num_reach $name $total $native_ssh echo "start scale out tidb" topo=./topo/full_scale_in_tidb_2nd.yaml tiup-cluster $common_args --yes scale-out $name $topo # the session certs exist on the new tidb tiup-cluster $common_args exec $name -N n2 --command "$check_cert_file" # the configurations are updated on the new tidb tiup-cluster $common_args exec $name -N n2 --command "$check_cert_config" tiup-cluster $common_args _test $name writable tiup-cluster $common_args --yes destroy $name } tiup-1.16.3/tests/tiup-cluster/script/scale_tools.sh000077500000000000000000000121661505422223000225400ustar00rootroot00000000000000#!/bin/bash set -eu function scale_tools() { mkdir -p ~/.tiup/bin/ version=$1 test_tls=$2 native_ssh=$3 client="" if [ $native_ssh == true ]; then client="--ssh=system" fi name="test_scale_tools_$RANDOM" if [ $test_tls = true ]; then topo=./topo/full_tls.yaml else topo=./topo/full_without_tiflash.yaml fi tiup-cluster $client --yes deploy $name $version $topo -i ~/.ssh/id_rsa # check the local config tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/prometheus-9090/conf/tidb.rules.yml" tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/alertmanager-9093/conf/alertmanager.yml" tiup-cluster $client exec $name -N n1 --command "grep alertmanagers /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" for item in pump drainer tidb tikv pd grafana node_exporter blackbox_exporter; do tiup-cluster $client exec $name -N n1 --command "grep $item /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" done tiup-cluster $client list | grep "$name" tiup-cluster $client --yes start $name tiup-cluster $client _test $name writable tiup-cluster $client display $name if [ $test_tls = true ]; then total_sub_one=19 total=20 total_add_one=21 else total_sub_one=21 total=22 total_add_one=23 fi echo "start scale in pump" tiup-cluster $client --yes scale-in $name -N n3:8250 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out pump" topo=./topo/full_scale_in_pump.yaml tiup-cluster $client --yes scale-out $name $topo echo "start scale in cdc" yes | tiup-cluster $client scale-in $name -N n3:8300 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out cdc" topo=./topo/full_scale_in_cdc.yaml yes | tiup-cluster $client scale-out $name $topo if [ $test_tls = false ]; then echo "start scale in tispark" yes | tiup-cluster $client --yes scale-in $name -N n4:7078 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out tispark" topo=./topo/full_scale_in_tispark.yaml yes | tiup-cluster $client --yes scale-out $name $topo fi echo "start scale in grafana" tiup-cluster $client --yes scale-in $name -N n1:3000 wait_instance_num_reach $name $total_sub_one $native_ssh echo "start scale out grafana" topo=./topo/full_scale_in_grafana.yaml tiup-cluster $client --yes scale-out $name $topo echo "start scale out prometheus" topo=./topo/full_scale_in_prometheus.yaml tiup-cluster $client --yes scale-out $name $topo wait_instance_num_reach $name $total_add_one $native_ssh echo "start scale in prometheus" tiup-cluster $client --yes scale-in $name -N n2:9090 wait_instance_num_reach $name $total $native_ssh # make sure grafana dashboards has been set to default (since the full_sale_in_grafana.yaml didn't provide a local dashboards dir) ! tiup-cluster $client exec $name -N n1 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/tidb.json" # currently tiflash is not supported in TLS enabled cluster # and only Tiflash support data-dir in multipath if [ $test_tls = false ]; then echo "start scale out tiflash(first time)" topo=./topo/full_scale_in_tiflash.yaml tiup-cluster $client --yes scale-out $name $topo tiup-cluster $client exec $name -N n1 --command "grep tiflash /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" # ensure scale-out will mark pd.enable-placement-rules to true. ref https://github.com/pingcap/tiup/issues/1226 curl n3:2379/pd/api/v1/config 2>/dev/null | grep '"enable-placement-rules": "true"' # ensure tiflash's data dir exists tiup-cluster $client exec $name -N n3 --command "ls /home/tidb/deploy/tiflash-9000/data1" tiup-cluster $client exec $name -N n3 --command "ls /data/tiflash-data" echo "start scale in tiflash" tiup-cluster $client --yes scale-in $name -N n3:9000 sleep 20 tiup-cluster $client display $name | grep Tombstone echo "start prune tiflash" yes | tiup-cluster $client prune $name wait_instance_num_reach $name $total $native_ssh ! tiup-cluster $client exec $name -N n3 --command "ls /home/tidb/deploy/tiflash-9000/data1" ! tiup-cluster $client exec $name -N n3 --command "ls /data/tiflash-data" echo "start scale out tiflash(second time)" topo=./topo/full_scale_in_tiflash.yaml tiup-cluster $client --yes scale-out $name $topo fi tiup-cluster $client _test $name writable tiup-cluster $client --yes destroy $name # test cluster log dir tiup-cluster notfound-command 2>&1 | grep $HOME/.tiup/logs/tiup-cluster-debug TIUP_LOG_PATH=/tmp/a/b tiup-cluster notfound-command 2>&1 | grep /tmp/a/b/tiup-cluster-debug } tiup-1.16.3/tests/tiup-cluster/script/tikv_cdc.sh000077500000000000000000000122031505422223000220070ustar00rootroot00000000000000#!/bin/bash set -eu function tikv_cdc_test() { mkdir -p ~/.tiup/bin/ version="nightly" topo_name="tikv_cdc" test_tls=false tikv_cdc_patch="" while [[ $# -gt 0 ]]; do case $1 in --version) version="$2" shift shift ;; --topo) topo_name="$2" shift shift ;; --tls) test_tls=true shift ;; --tikv-cdc-patch) tikv_cdc_patch="$2" shift shift ;; esac done name="test_tikv_cdc_$RANDOM" if [ $test_tls = true ]; then topo=./topo/${topo_name}_tls.yaml else topo=./topo/$topo_name.yaml fi # identify SSH via ssh-agent eval $(ssh-agent) ssh-add /root/.ssh/id_rsa tiup-cluster check $topo --apply # Test check version. Cluster version >= v6.2.0 is required. # Error message: "Error: init config failed: n3:8600: tikv-cdc only supports cluster version v6.2.0 or later" ! tiup-cluster --yes deploy $name 6.1.0 $topo tiup-cluster --yes deploy $name $version $topo # check the local config tiup-cluster exec $name -R tikv-cdc --command 'grep "gc-ttl = 43200$" /home/tidb/deploy/tikv-cdc-8600/conf/tikv-cdc.toml' tiup-cluster list | grep "$name" tiup-cluster audit | grep "deploy $name $version" # Get the audit id can check it just runnable id=$(tiup-cluster audit | grep "deploy $name $version" | awk '{print $1}') tiup-cluster audit $id tiup-cluster --yes start $name # Patch if [[ ! -z "$tikv_cdc_patch" ]]; then wget https://tiup-mirrors.pingcap.com/tikv-cdc-v${tikv_cdc_patch}-linux-amd64.tar.gz tiup install tikv-cdc:v${tikv_cdc_patch} tiup-cluster --yes patch $name ./tikv-cdc-v${tikv_cdc_patch}-linux-amd64.tar.gz -R tikv-cdc --offline tiup-cluster display $name | grep "tikv-cdc (patched)" fi tiup-cluster _test $name writable # check the data dir tiup-cluster exec $name -N n3 --command "grep /home/tidb/deploy/tikv-cdc-8600/data /home/tidb/deploy/tikv-cdc-8600/scripts/run_tikv-cdc.sh" tiup-cluster exec $name -N n4 --command "grep /home/tidb/tikv_cdc_data /home/tidb/deploy/tikv-cdc-8600/scripts/run_tikv-cdc.sh" # test patch overwrite if [[ ! -z "$tikv_cdc_patch" ]]; then tiup-cluster --yes patch $name ./tikv-cdc-v${tikv_cdc_patch}-linux-amd64.tar.gz -R tikv-cdc --overwrite # overwrite with the same tarball twice tiup-cluster --yes patch $name ./tikv-cdc-v${tikv_cdc_patch}-linux-amd64.tar.gz -R tikv-cdc --overwrite fi tiup-cluster --yes stop $name tiup-cluster --yes start $name -R pd,tikv-cdc tiup-cluster --yes restart $name tiup-cluster _test $name writable tiup-cluster _test $name data # Test enable & disable tiup-cluster exec $name -R tikv-cdc --command="systemctl status tikv-cdc-8600|grep 'enabled;'" tiup-cluster disable $name -R tikv-cdc tiup-cluster exec $name -R tikv-cdc --command="systemctl status tikv-cdc-8600|grep 'disabled;'" tiup-cluster disable $name tiup-cluster enable $name tiup-cluster exec $name -R tikv-cdc --command="systemctl status tikv-cdc-8600|grep 'enabled;'" tiup-cluster --yes clean $name --data --all --ignore-node n5:8600 echo "checking cleanup data and log" ! tiup-cluster exec $name -N n3 --command "ls /home/tidb/deploy/tikv-cdc-8600/log/tikv.log" tiup-cluster --yes start $name ! tiup-cluster _test $name data tiup-cluster --yes destroy $name } function tikv_cdc_scale_test() { mkdir -p ~/.tiup/bin/ version="nightly" topo_name="tikv_cdc" test_tls=false while [[ $# -gt 0 ]]; do case $1 in --version) version="$2" shift shift ;; --topo) topo_name="$2" shift shift ;; --tls) test_tls=true shift ;; esac done name=test_tikv_cdc_scale_$RANDOM if [ $test_tls = true ]; then topo=./topo/${topo_name}_tls.yaml else topo=./topo/${topo_name}.yaml fi tiup-cluster --yes deploy $name $version $topo tiup-cluster --yes start $name tiup-cluster _test $name writable tiup-cluster display $name total_sub_one=13 total=14 total_add_one=15 echo -e "\033[0;36m Start scale in tikv-cdc (-n3) \033[0m" yes | tiup-cluster scale-in $name -N n3:8600 wait_instance_num_reach $name $total_sub_one false echo -e "\033[0;36m Start scale out tikv-cdc (+n5) \033[0m" mkdir -p /tmp/topo topo=/tmp/topo/tikv_cdc_scale_in.yaml cat < $topo kvcdc_servers: - host: n5 EOF yes | tiup-cluster scale-out $name $topo wait_instance_num_reach $name $total false echo -e "\033[0;36m Scale out another tikv-cdc on n5 to verify port conflict detection \033[0m" cat < $topo kvcdc_servers: - host: n5 data_dir: "/home/tidb/tikv_cdc_data_1" EOF # should fail with message "Error: port conflict for '8600' between 'kvcdc_servers:n5.port' and 'kvcdc_servers:n5.port'" ! yes | tiup-cluster scale-out $name $topo # should fail echo -e "\033[0;36m Scale out another tikv-cdc on n5 with different port & data_dir \033[0m" cat < $topo kvcdc_servers: - host: n5 port: 8666 data_dir: "/home/tidb/tikv_cdc_data_1" EOF yes | tiup-cluster scale-out $name $topo wait_instance_num_reach $name $total_add_one false # scale in n4, as n4 should be the owner. echo -e "\033[0;36m Start scale in tikv-cdc (-n4) \033[0m" yes | tiup-cluster scale-in $name -N n4:8600 wait_instance_num_reach $name $total false tiup-cluster _test $name writable tiup-cluster --yes destroy $name } tiup-1.16.3/tests/tiup-cluster/script/upgrade.sh000077500000000000000000000026551505422223000216620ustar00rootroot00000000000000#!/bin/bash set -eu function upgrade() { mkdir -p ~/.tiup/bin/ old_version=$1 version=$2 test_tls=$3 name=test_upgrade_$RANDOM if [ $test_tls = true ]; then topo=./topo/upgrade_tls.yaml else topo=./topo/upgrade.yaml fi mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ yes | tiup-cluster deploy $name $old_version $topo -i ~/.ssh/id_rsa yes | tiup-cluster start $name # ENV_LABELS_ENV will be replaced only if the rule_dir is not specified. if [ $test_tls = true ]; then tiup-cluster exec $name -N n1 --command "grep -q ${name} /home/tidb/deploy/prometheus-9090/conf/*.rules.yml" ! tiup-cluster exec $name -N n1 --command "grep -q ENV_LABELS_ENV /home/tidb/deploy/prometheus-9090/conf/*.rules.yml" fi tiup-cluster _test $name writable yes | tiup-cluster upgrade $name $version --transfer-timeout 60 tiup-cluster _test $name writable # test edit-config & reload # change the config of pump and check it after reload # https://stackoverflow.com/questions/5978108/open-vim-from-within-bash-shell-script EDITOR=ex tiup-cluster edit-config -y $name < # get the instance number of the cluster # filter the output of the go test # PASS # coverage: 12.7% of statements in github.com/pingcap/tiup/components/cluster/... function instance_num() { name=$1 native_ssh=$2 client="" if [ $native_ssh == true ]; then client="--ssh=system" fi count=$(tiup-cluster $client display $name | grep "Total nodes" | awk -F ' ' '{print $3}') echo $count } # wait_instance_num_reach # wait the instance number of cluster reach the target_num. # timeout 120 second function wait_instance_num_reach() { name=$1 target_num=$2 native_ssh=$3 client="" if [ $native_ssh == true ]; then client="--ssh=system" fi for ((i=0;i<120;i++)) do tiup-cluster $client prune $name --yes count=$(instance_num $name $native_ssh) if [ "$count" == "$target_num" ]; then echo "instance number reach $target_num" return else sleep 1 fi sleep 1 done echo "fail to wait instance number reach $target_num, count $count, retry num: $i" tiup-cluster $client display $name exit -1 } tiup-1.16.3/tests/tiup-cluster/test_cmd.sh000077500000000000000000000002151505422223000205170ustar00rootroot00000000000000#!/bin/bash set -eu source script/cmd_subtest.sh echo "test cluster for version v6.2.0 wo/ TLS, via easy ssh" cmd_subtest --version 6.2.0 tiup-1.16.3/tests/tiup-cluster/test_cmd_tls_native_ssh.sh000077500000000000000000000004271505422223000236310ustar00rootroot00000000000000#!/bin/bash set -eu source script/cmd_subtest.sh export GO_FAILPOINTS='github.com/pingcap/tiup/pkg/cluster/executor/assertNativeSSH=return(true)' echo "test cluster for version v6.0.0 w/ TLS, via native ssh" cmd_subtest --version 6.0.0 --tls --native-ssh unset GO_FAILPOINTS tiup-1.16.3/tests/tiup-cluster/test_scale_core.sh000077500000000000000000000002541505422223000220560ustar00rootroot00000000000000#!/bin/bash set -eu source script/scale_core.sh echo "test scaling of core components in cluster for version v6.2.0 wo/ TLS, via easy ssh" scale_core v6.2.0 false false tiup-1.16.3/tests/tiup-cluster/test_scale_core_tls.sh000077500000000000000000000002531505422223000227370ustar00rootroot00000000000000#!/bin/bash set -eu source script/scale_core.sh echo "test scaling of core components in cluster for version v5.3.0 w/ TLS, via easy ssh" scale_core v4.0.12 true false tiup-1.16.3/tests/tiup-cluster/test_scale_tiproxy.sh000066400000000000000000000002451505422223000226410ustar00rootroot00000000000000#!/bin/bash set -eu source script/scale_tiproxy.sh echo "test scaling of tidb and tiproxy in cluster for version v8.2.0, via easy ssh" scale_tiproxy v8.2.0 false tiup-1.16.3/tests/tiup-cluster/test_scale_tools.sh000077500000000000000000000002511505422223000222630ustar00rootroot00000000000000#!/bin/bash set -eu source script/scale_tools.sh echo "test scaling of tools components in cluster for version v4.0.12, via easy ssh" scale_tools v4.0.12 false false tiup-1.16.3/tests/tiup-cluster/test_scale_tools_tls.sh000077500000000000000000000002551505422223000231510ustar00rootroot00000000000000#!/bin/bash set -eu source script/scale_tools.sh echo "test scaling of tools components in cluster for version v5.3.0 w/ TLS, via easy ssh" scale_tools v6.0.0 true false tiup-1.16.3/tests/tiup-cluster/test_tikv_cdc.sh000077500000000000000000000010051505422223000215400ustar00rootroot00000000000000#!/bin/bash set -eu source script/tikv_cdc.sh # TODO: test tls after TLS is supported. # TODO: test upgrade of TiKV-CDC (there is only one release version now) ############################################### echo -e "\033[0;36m<<< Test specified cases for TiKV-CDC >>>\033[0m" tikv_cdc_test --version 6.2.0 --topo tikv_cdc --tikv-cdc-patch 1.0.0 ############################################### echo -e "\033[0;36m<<< Test scale in/out for TiKV-CDC >>>\033[0m" tikv_cdc_scale_test --version 6.2.0 --topo tikv_cdc tiup-1.16.3/tests/tiup-cluster/test_upgrade.sh000077500000000000000000000003311505422223000214020ustar00rootroot00000000000000#!/bin/bash set -eu old_version=${old_version-v3.0.20} version=${version-v4.0.12} source script/upgrade.sh echo "test upgrade cluster version from $old_version to $version" upgrade "$old_version" "$version" false tiup-1.16.3/tests/tiup-cluster/test_upgrade_tls.sh000077500000000000000000000003271505422223000222710ustar00rootroot00000000000000#!/bin/bash set -eu old_version=${old_version-v4.0.15} version=${version-v6.0.0} source script/upgrade.sh echo "test upgrade cluster version from $old_version to $version" upgrade "$old_version" "$version" true tiup-1.16.3/tests/tiup-cluster/topo/000077500000000000000000000000001505422223000173415ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-cluster/topo/full.yaml000066400000000000000000000025351505422223000211740ustar00rootroot00000000000000global: user: tidb group: pingcap server_configs: tidb: binlog.enable: true binlog.ignore-error: false tikv: storage.reserve-space: 5M pump: storage.stop-write-at-available-space: 1 mib tidb_servers: - host: n1 - host: n2 pd_servers: - host: n3 - host: n4 - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - host: n1 - host: n3 data_dir: "/home/tidb/my_kv_data" - host: n4 - host: n5 # tiflash eat too much memory # and binary is more than 1G.. tiflash_servers: - host: n3 data_dir: "data1,/data/tiflash-data" # - host: n4 # - host: n5 # tiproxy_servers: - host: n1 pump_servers: - host: n3 - host: n4 - host: n5 drainer_servers: - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" cdc_servers: - host: n3 - host: n4 - host: n5 kvcdc_servers: - host: n3 - host: n4 data_dir: "/home/tidb/tikv_cdc_data" tispark_masters: - host: n3 tispark_workers: - host: n4 monitoring_servers: - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml monitored: node_exporter_port: 9100 blackbox_exporter_port: 9115 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_cdc.yaml000066400000000000000000000000321505422223000236300ustar00rootroot00000000000000cdc_servers: - host: n3 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_grafana.yaml000066400000000000000000000000361505422223000245020ustar00rootroot00000000000000grafana_servers: - host: n1 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_pd.yaml000066400000000000000000000000311505422223000235010ustar00rootroot00000000000000pd_servers: - host: n3 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_prometheus.yaml000066400000000000000000000001051505422223000252730ustar00rootroot00000000000000monitoring_servers: - host: n2 rule_dir: /tmp/local/prometheus tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_pump.yaml000066400000000000000000000000331505422223000240610ustar00rootroot00000000000000pump_servers: - host: n3 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_tidb.yaml000066400000000000000000000000331505422223000240220ustar00rootroot00000000000000tidb_servers: - host: n1 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_tidb_2nd.yaml000066400000000000000000000000331505422223000245650ustar00rootroot00000000000000tidb_servers: - host: n2 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_tiflash.yaml000066400000000000000000000001071505422223000245340ustar00rootroot00000000000000tiflash_servers: - host: n3 data_dir: "data1,/data/tiflash-data" tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_tikv.yaml000066400000000000000000000000331505422223000240550ustar00rootroot00000000000000tikv_servers: - host: n3 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_tiproxy.yaml000066400000000000000000000000361505422223000246210ustar00rootroot00000000000000tiproxy_servers: - host: n2 tiup-1.16.3/tests/tiup-cluster/topo/full_scale_in_tispark.yaml000066400000000000000000000000361505422223000245600ustar00rootroot00000000000000tispark_workers: - host: n5 tiup-1.16.3/tests/tiup-cluster/topo/full_tls.yaml000066400000000000000000000017701505422223000220560ustar00rootroot00000000000000global: user: tidb group: pingcap enable_tls: true server_configs: tidb: binlog.enable: true binlog.ignore-error: false tikv: storage.reserve-space: 5M pump: storage.stop-write-at-available-space: 1 mib tidb_servers: - host: n1 - host: n2 pd_servers: - host: n3 - host: n4 - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - host: n1 - host: n3 data_dir: "/home/tidb/my_kv_data" - host: n4 - host: n5 pump_servers: - host: n3 - host: n4 - host: n5 tiproxy_servers: - host: n1 drainer_servers: - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" cdc_servers: - host: n3 - host: n4 - host: n5 monitoring_servers: - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml tiup-1.16.3/tests/tiup-cluster/topo/full_without_tiflash.yaml000066400000000000000000000020431505422223000244630ustar00rootroot00000000000000global: user: tidb group: pingcap server_configs: tidb: binlog.enable: true binlog.ignore-error: false tikv: storage.reserve-space: 5M pump: storage.stop-write-at-available-space: 1 mib tidb_servers: - host: n1 - host: n2 pd_servers: - host: n3 - host: n4 - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - host: n1 - host: n3 data_dir: "/home/tidb/my_kv_data" - host: n4 - host: n5 pump_servers: - host: n3 - host: n4 - host: n5 drainer_servers: - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" tiproxy_servers: - host: n1 cdc_servers: - host: n3 - host: n4 - host: n5 tispark_masters: - host: n3 tispark_workers: - host: n4 monitoring_servers: - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml tiup-1.16.3/tests/tiup-cluster/topo/tikv_cdc.yaml000066400000000000000000000013631505422223000220160ustar00rootroot00000000000000global: user: tidb group: pingcap server_configs: tikv: storage.reserve-space: 5M storage.api-version: 2 storage.enable-ttl: true kvcdc: gc-ttl: 43200 tidb_servers: - host: n1 - host: n2 pd_servers: - host: n3 - host: n4 - host: n5 tikv_servers: - host: n1 - host: n3 data_dir: "/home/tidb/my_kv_data" - host: n4 - host: n5 kvcdc_servers: - host: n3 - host: n4 data_dir: "/home/tidb/tikv_cdc_data" monitoring_servers: - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml monitored: node_exporter_port: 9100 blackbox_exporter_port: 9115 tiup-1.16.3/tests/tiup-cluster/topo/tiproxy.yaml000066400000000000000000000003771505422223000217520ustar00rootroot00000000000000global: user: tidb group: pingcap component_versions: tiproxy: v1.2.0 tidb_servers: - host: n1 - host: n2 pd_servers: - host: n3 - host: n4 - host: n5 tikv_servers: - host: n3 - host: n4 - host: n5 tiproxy_servers: - host: n1 tiup-1.16.3/tests/tiup-cluster/topo/upgrade.yaml000066400000000000000000000017021505422223000216540ustar00rootroot00000000000000server_configs: tidb: binlog.enable: true binlog.ignore-error: false tikv: storage.reserve-space: 5M pump: storage.stop-write-at-available-space: 1 mib tidb_servers: - host: n1 - host: n2 pd_servers: - host: n3 - host: n4 - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - host: n2 - host: n3 - host: n4 - host: n5 # tiflash eat too much memory # and binary is more than 1G.. # tiflash_servers: # - host: n3 # - host: n4 # - host: n5 pump_servers: - host: n3 - host: n4 - host: n5 drainer_servers: - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" monitoring_servers: - host: n1 rule_dir: /tmp/local/prometheus grafana_servers: - host: n1 dashboard_dir: /tmp/local/grafana alertmanager_servers: - host: n1 config_file: /tmp/local/alertmanager/alertmanager.yml tiup-1.16.3/tests/tiup-cluster/topo/upgrade_tls.yaml000066400000000000000000000013341505422223000225370ustar00rootroot00000000000000global: enable_tls: true server_configs: tidb: binlog.enable: true binlog.ignore-error: false tikv: storage.reserve-space: 5M pump: storage.stop-write-at-available-space: 1 mib tidb_servers: - host: n1 - host: n2 pd_servers: - host: n3 - host: n4 - host: n5 # Note if only 3 instance, when scale-in one of it. # It may not be tombstone. tikv_servers: - host: n2 - host: n3 - host: n4 - host: n5 pump_servers: - host: n3 - host: n4 - host: n5 drainer_servers: - host: n1 data_dir: /home/tidb/data/drainer-8249/data commit_ts: -1 config: syncer.db-type: "file" monitoring_servers: - host: n1 grafana_servers: - host: n1 alertmanager_servers: - host: n1 tiup-1.16.3/tests/tiup-cluster/topology.yaml000066400000000000000000000004731505422223000211240ustar00rootroot00000000000000tidb_servers: - host: 172.16.5.138 - host: 172.16.4.190 - host: 172.16.4.102 pd_servers: - host: 172.16.4.190 data_dir: "/data" - host: 172.16.5.158 - host: 172.16.5.53 tikv_servers: - host: 172.16.5.138 - host: 172.16.4.190 - host: 172.16.5.158 - host: 172.16.5.53 - host: 172.16.4.102 tiup-1.16.3/tests/tiup-dm/000077500000000000000000000000001505422223000152775ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/ansible_data/000077500000000000000000000000001505422223000177055ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/ansible_data/.gitignore000066400000000000000000000000301505422223000216660ustar00rootroot00000000000000hosts.ini inventory.ini tiup-1.16.3/tests/tiup-dm/ansible_data/hosts.ini.tpl000066400000000000000000000001731505422223000223450ustar00rootroot00000000000000[servers] __IPPREFIX__.101 __IPPREFIX__.102 __IPPREFIX__.103 __IPPREFIX__.104 __IPPREFIX__.105 [all:vars] username = tidb tiup-1.16.3/tests/tiup-dm/ansible_data/inventory.ini.tpl000066400000000000000000000021321505422223000232370ustar00rootroot00000000000000## DM modules [dm_master_servers] dm_master ansible_host=__IPPREFIX__.101 [dm_worker_servers] dm-worker1 ansible_host=__IPPREFIX__.101 server_id=101 source_id="mysql-replica-01" mysql_host=mysql1 mysql_user=root mysql_password='' mysql_port=3306 dm-worker2 ansible_host=__IPPREFIX__.102 server_id=102 source_id="mysql-replica-02" mysql_host=mysql2 mysql_user=root mysql_password='' mysql_port=3306 [dm_portal_servers] dm_portal ansible_host=__IPPREFIX__.101 ## Monitoring modules [prometheus_servers] prometheus ansible_host=__IPPREFIX__.101 [grafana_servers] ; grafana ansible_host=__IPPREFIX__.101 ; change to add specified port for test, ref: https://docs.pingcap.com/zh/tidb-data-migration/dev/deploy-a-dm-cluster-using-ansible#%E9%BB%98%E8%AE%A4%E6%9C%8D%E5%8A%A1%E7%AB%AF%E5%8F%A3 grafana ansible_host=__IPPREFIX__.101 grafana_port=3001 [alertmanager_servers] alertmanager ansible_host=__IPPREFIX__.101 ## Global variables [all:vars] cluster_name = test-cluster ansible_user = tidb dm_version = v1.0.6 deploy_dir = /home/tidb/deploy grafana_admin_user = "admin" grafana_admin_password = "admin" tiup-1.16.3/tests/tiup-dm/cover/000077500000000000000000000000001505422223000164155ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/cover/.gitkeep000066400000000000000000000000001505422223000200340ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/local/000077500000000000000000000000001505422223000163715ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/local/alertmanager/000077500000000000000000000000001505422223000210335ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/local/alertmanager/alertmanager.yml000066400000000000000000000037731505422223000242320ustar00rootroot00000000000000# this is a test message: magic-string-for-test global: # The smarthost and SMTP sender used for mail notifications. smtp_smarthost: "localhost:25" smtp_from: "alertmanager@example.org" smtp_auth_username: "alertmanager" smtp_auth_password: "password" # smtp_require_tls: true # The Slack webhook URL. # slack_api_url: '' route: # A default receiver receiver: "db-alert-email" # The labels by which incoming alerts are grouped together. For example, # multiple alerts coming in for cluster=A and alertname=LatencyHigh would # be batched into a single group. group_by: ["env", "instance", "alertname", "type", "group", "job"] # When a new group of alerts is created by an incoming alert, wait at # least 'group_wait' to send the initial notification. # This way ensures that you get multiple alerts for the same group that start # firing shortly after another are batched together on the first # notification. group_wait: 30s # When the first notification was sent, wait 'group_interval' to send a batch # of new alerts that started firing for that group. group_interval: 3m # If an alert has successfully been sent, wait 'repeat_interval' to # resend them. repeat_interval: 3m routes: # - match: # receiver: webhook-kafka-adapter # continue: true # - match: # env: test-cluster # receiver: db-alert-slack # - match: # env: test-cluster # receiver: db-alert-email receivers: # - name: 'webhook-kafka-adapter' # webhook_configs: # - send_resolved: true # url: 'http://10.0.3.6:28082/v1/alertmanager' #- name: 'db-alert-slack' # slack_configs: # - channel: '#alerts' # username: 'db-alert' # icon_emoji: ':bell:' # title: '{{ .CommonLabels.alertname }}' # text: '{{ .CommonAnnotations.summary }} {{ .CommonAnnotations.description }} expr: {{ .CommonLabels.expr }} http://172.0.0.1:9093/#/alerts' - name: "db-alert-email" email_configs: - send_resolved: true to: "xxx@xxx.com" tiup-1.16.3/tests/tiup-dm/local/grafana/000077500000000000000000000000001505422223000177705ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/local/grafana/dm.json000066400000000000000000000067761505422223000213030ustar00rootroot00000000000000{ "__inputs": [ { "name": "test", "label": "test", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "6.1.6" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" }, { "type": "panel", "id": "singlestat", "name": "Singlestat", "version": "" } ], "annotations": { "list": [ { "builtIn": 1, "datasource": "test", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "id": null, "iteration": 1582881408361, "links": [], "panels": [ { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 53, "panels": [], "repeat": null, "title": "overview (magic-string-for-test)", "type": "row" } ], "refresh": "30s", "schemaVersion": 18, "style": "dark", "tags": [], "templating": { "list": [ { "allValue": null, "current": {}, "datasource": "test", "definition": "", "hide": 0, "includeAll": false, "label": null, "multi": false, "name": "task", "options": [], "query": "label_values(dm_worker_task_state, task)", "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "allValue": null, "current": {}, "datasource": "test", "definition": "", "hide": 0, "includeAll": false, "label": null, "multi": true, "name": "source", "options": [], "query": "label_values(dm_worker_task_state, source_id)", "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "allValue": null, "current": {}, "datasource": "test", "definition": "", "hide": 0, "includeAll": false, "label": null, "multi": true, "name": "instance", "options": [], "query": "label_values(dm_worker_task_state{task=\"$task\"}, instance)", "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false } ] }, "time": { "from": "now-1h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "", "title": "test-DM-task (magic-string-for-test)", "uid": "wkbFDNlZz", "version": 1 }tiup-1.16.3/tests/tiup-dm/local/grafana/dm_instances.json000066400000000000000000000336121505422223000233370ustar00rootroot00000000000000{ "__inputs": [ { "name": "test", "label": "test", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "6.1.6" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "1.0.0" }, { "type": "panel", "id": "singlestat", "name": "Singlestat", "version": "" } ], "annotations": { "list": [ { "builtIn": 1, "datasource": "test", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "id": null, "iteration": 1582881283427, "links": [], "panels": [ { "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 }, "id": 17, "panels": [ { "cacheTimeout": null, "colorBackground": false, "colorValue": false, "colors": [ "#299c46", "rgba(237, 129, 40, 0.89)", "#d44a3a" ], "datasource": "test", "description": "The storage capacity of the disk occupied by the relay log", "format": "bytes", "gauge": { "maxValue": 100, "minValue": 0, "show": false, "thresholdLabels": false, "thresholdMarkers": true }, "gridPos": { "h": 7, "w": 6, "x": 0, "y": 1 }, "id": 1, "interval": null, "links": [], "mappingType": 1, "mappingTypes": [ { "name": "value to text", "value": 1 }, { "name": "range to text", "value": 2 } ], "maxDataPoints": 100, "nullPointMode": "connected", "nullText": null, "postfix": "", "postfixFontSize": "50%", "prefix": "", "prefixFontSize": "50%", "rangeMaps": [ { "from": "null", "text": "N/A", "to": "null" } ], "sparkline": { "fillColor": "rgba(31, 118, 189, 0.18)", "full": false, "lineColor": "rgb(31, 120, 193)", "show": false }, "tableColumn": "", "targets": [ { "expr": "dm_relay_space{instance=\"$instance\", type=\"capacity\"}", "format": "time_series", "intervalFactor": 2, "refId": "A" } ], "thresholds": "", "title": "storage capacity", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ { "op": "=", "text": "N/A", "value": "null" } ], "valueName": "current" } ], "repeat": null, "title": "relay log", "type": "row" }, { "collapsed": true, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 1 }, "id": 18, "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "The current state of subtasks in the instance.\n\n0: Invalid\n\n1: New\n\n2: Running\n\n3: Paused\n\n4: Stopped\n\n5: Finished", "fill": 1, "gridPos": { "h": 7, "w": 6, "x": 0, "y": 2 }, "id": 13, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "dm_worker_task_state{instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "task state", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "transparent": true, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 0, "format": "short", "label": null, "logBase": 1, "max": "5", "min": "0", "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "The data import process percentage of Loader. The value range is 0% ~ 100%", "fill": 1, "gridPos": { "h": 7, "w": 6, "x": 6, "y": 2 }, "id": 14, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "dm_loader_progress{instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "load progress", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "transparent": true, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": null, "format": "percentunit", "label": null, "logBase": 1, "max": "1", "min": "0", "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "The number of binlog files in Syncer that are behind the master", "fill": 1, "gridPos": { "h": 7, "w": 6, "x": 12, "y": 2 }, "id": 15, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "dm_syncer_binlog_file{instance=\"$instance\", node=\"master\"} - ON(instance, task, job) dm_syncer_binlog_file{instance=\"$instance\", node=\"syncer\"}", "format": "time_series", "intervalFactor": 2, "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "binlog file gap between master and syncer", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "transparent": true, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 0, "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "test", "description": "Is waiting shard DDL lock to be resolved, >0 means waiting", "fill": 1, "gridPos": { "h": 7, "w": 6, "x": 18, "y": 2 }, "id": 16, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": false, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "dm_syncer_shard_lock_resolving{instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "shard lock resolving", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "transparent": true, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 0, "format": "short", "label": null, "logBase": 1, "max": "1", "min": "0", "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": false } ], "yaxis": { "align": false, "alignLevel": null } } ], "repeat": null, "title": "task", "type": "row" } ], "refresh": "30s", "schemaVersion": 18, "style": "dark", "tags": [], "templating": { "list": [ { "allFormat": "glob", "allValue": null, "current": {}, "datasource": "test", "definition": "", "hide": 0, "includeAll": false, "label": null, "multi": false, "name": "instance", "options": [], "query": "label_values(dm_relay_space, instance)", "refresh": 1, "regex": "", "skipUrlSync": false, "sort": 1, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false } ] }, "time": { "from": "now-1h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "", "title": "test-DM-worker-instances (magic-string-for-test)", "uid": "le1FDN_Wz", "version": 1 }tiup-1.16.3/tests/tiup-dm/local/prometheus/000077500000000000000000000000001505422223000205645ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/local/prometheus/dm_worker.rules.yml000066400000000000000000000010171505422223000244300ustar00rootroot00000000000000# magic-string-for-test groups: - name: alert.rules rules: - alert: DM_remain_storage_of_relay_log expr: dm_relay_space{type="available"} < 10*1024*1024*1024 labels: env: ENV_LABELS_ENV level: critical expr: dm_relay_space{type="available"} < 10*1024*1024*1024 annotations: description: "cluster: ENV_LABELS_ENV, instance: {{ $labels.instance }}, values: {{ $value }}" value: "{{ $value }}" summary: DM remain storage of relay log tiup-1.16.3/tests/tiup-dm/root.json000066400000000000000000000161531505422223000171630ustar00rootroot00000000000000{"signatures":[{"keyid":"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002","sig":"kmRKAh18iBSxTU20AlqCFYV5w4OgK72RGqN5sGkDy5I+xxKlMgq2xvlWH329bpRdtmajd6wozb6ibEhiijWei7DnQCkJPVw1Y5DdKyRAHjM8ZYeawVNaUCh2VH13is6cbzSPp30CKZ5whtuERAgkLsX/M0E6bterzhQR//PTNfO41/NOgXnyqGgOslzM46N86rCBPcQwHcyHV0voXX1zGpCSDLkODEbi6ILweJl0iNnaBF+lBzvW6N+oqmEUUzbH7tLSCsJVyj3tLSlPvbTO8BoWnkN1k67WNkBBfA9vD0s46j2hWGuTMApiUa5Iou6/N0JI9Sb2kZAle15TJQJvEA=="},{"keyid":"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae","sig":"YNGnO9+vcnfwI0ELBdJZh1KEu62NIPZcV2TKfxEBaj7g/Fm1HO3ReAH36dew9MytDPRuIdCXYHVSOQfiBQCuJCF7k/hRQwEFlrJWHMuN2rz7YLqTUyBzkE/PbAEOTtl4C/Q8bxHZ6tHdOJAUIBsXTy4yRZVempUtIULjPNh2d6BPt7x61+3RwhhrPHKFxy2I1hVaZIMfO94Ofb4iwd0UJ1YuQdDqIP75+YtoYUsBFKee4AqvRgKrp5rqQvn0CGOOreOIXvTsszvuzxR26kWvOPPkS3zJRwTu7TW0pE0AsUgEKHKGDoShNdcmx+hGz8mn8BpAivxvJhqqJsz9nazkZA=="},{"keyid":"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a","sig":"X2FykukrMhf2qeK4bgOTy272VMFJeHAQYNGayckbjfdfToTfnRt+mwV1x5jUFTq39XsEPC6wgTs3ZWQ/3DJ1Wv7oyytN6QTPvblcFnAHNdCNGU2QceyG1N9efRbnpL+VzaWVmpwqrV2DjSHNRYMJsCfwbuC9ZCZ4HFL+bv/InZnM7Zxg7Yrl3Vzs6gOYnlZr19vgQOW/n7CCYgE2X9iS+Y3/8ALFtgU+CJEjAXK37N769H+kYG+IuuhSobdhBk/ie+oXTxm/Y5BjNBvBqetUVpCdqw/PpJ+vpN7KBGGVg3ij10wA2a1B+CA1dxjF6Tzg/HAnNRidZrbmWrkc8I69IA=="},{"keyid":"d499c8ca3c2018885037da89bdfa327e21a0e6a15e2ca8dabe5e78a3edf9c91c","sig":"wo6iQdse3ADk4SirmdjCdjOnm9sg6ztKEAFfi2BRZE1Z8FlnzUp8S+OztmgRqsVVfGEDAwgWJeimRjohznKBz0tS3bZjzLdHllw0ZHv1n+i8vaBVA5uvpZuzjpCdQbNEh4o9yrVv7hqyFVva5LEMJelOkWV6TvdgdPztHC7B7neyNm7I/0QI3mG8HHFen/CX2CHJu6OQ29XHjbe79VnTbo2ujK9b3ZIHivRMpND0/DBKxSWuXvQ2BSC1nYNDBoJ2AneOQ+aUM6WMUQst8ct9pGlZszyZX/8A1Pbj/e1ToPgkEDSqg2w+Bxty0allc4nyaijzBDo6SoKoWfXKi9wq0Q=="},{"keyid":"70033289cef8e5105914b22ecc7f15e271d0650d5216d1ab0bd67f8440411bb6","sig":"OoK5e9uZ6xu4b6WQjbJge2hsJR10DJ70vEsFRY26C52m5WOrhOJeDxthy5wF01P3odB5hWAAKYDRzB+wUy/h05O33ZXxYrbtqDkwkBYOPRr0X24MDz2nD+etOXyed69V77xi62JYSbl9aP5ItKDGrrYDkOzkQcP2q87eaVJxRJEm0qrA4Jtz8oZ/nOZwX9vZRmd/qNorye/On8yxbtueqCkTVKULI0c/CwBw/rAr+THa1alPqn24djS3MqBst+DDRWR3996M9Cz5agWBASo6TSXNoUnIOGyza7zqoNsdrePLHCbInUWuJ+lN2ilGTgl6ERyYoc0+QFmRaVljG5pGcw=="}],"signed":{"_type":"root","expires":"2024-07-26T11:18:30+08:00","roles":{"index":{"keys":{"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn5kVA7MlBfSe7EBaExjl\nKbwoDkn1aYi74s29mFgtRo8nejbrVvZQMCIUhvKc0pFa/l9JD/QY6/nAOCE1lpzi\nwwNkSntfOo3p3HQIR+Ut7hZ4Sxfe/5JagGo3LQ+Hd3EJWUxyEfQ/Bff07F3XAbqM\n5+cKNrdsKWZJcPiJDW621qGwCx52f+gzl9bnFe4/hx34OUgirwqh5DS+LhIO+/yt\nbOiN1AyjQKlnb8lUnblElS4Njd+F4io5VzSrZYi2+4AbTkO6wLwbsWHMzXfv9qwn\nvllufOHpB6EwiQ/xBOMuvJJymHnZvs8AH4SuydQIXLaJuv1ysFaBs0KB/ktbakSK\nLwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/index.json"},"root":{"keys":{"5607181203a2fb60b9d725109388ccb19ccdc236a4b1d1441fbea7ad07616c4a":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyDwCfCl30vhyJW7fB1bs\npRYKtBKzl7o0qnJTm+IksjQ8RXxj8osUpMLmSvOzCaJ5Wxe+Pm1LpSTDbbubbgvd\nnmEFL6228sifviNIu2HlIl+agfzmXuJ9OBlzGUaI4gAd1Z6pF6+mjlcjz2PbWF84\nAbXZdK49uluqulp7HrGB/qNjGcIRUCHgDU4nnq0OkI1BZZSKm9ovonqDkIK76x/S\niAD9OjKsjQ/s57tE+5WTVObKpfrfK0JeHdpAUsA/2n4L1Z6FmZD4LZWqb0i+C7xj\nMElC99KtjlwRntcjeVWG9YjU8AcEN0n1gON9S2oRdyyAzDTgGb7WueDnn6qstt5w\nSQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"70033289cef8e5105914b22ecc7f15e271d0650d5216d1ab0bd67f8440411bb6":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApU5RHP0K+Fgkzm9L0yHR\n4CEqLLwHr7hQxjWOaq5K0UfaOKcsPQZ1SkJ/AMppz7ovzwOU4hcy0wJOV7ms6ACk\nS3hte2GlH/xp+OzWiRnI4qJ6GRrAe+ototj1ZMGvpLK4ifxkKaY6vuWFFAeS0fSe\nPHUGAl5v+PaJWgDNQTRmuAu5oCaYP6oT6VKHj6ulLAgAOqWsBSJiK3oIRcWPR+uI\nIW/9BV158wfmxAw1+7ch1RD44+1vV3+Eo94alvVZIAfcJqDS3XGr2Hfd/YWGj1d2\nD26eblBJoQt0L2E2EL8igu1sudVkMZ3NAIfmBrOWUxHEbIjYeKvXPbaSGdC+FoXD\nrwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOgQkwLOh31QV9OpbO9v\n6o83durJFGPOnVXZiab83pKaSk7HEK9WzXBq0BaPvtFwSfROVdpgtopri5lZi+uH\naMKLUn5F8XRnSMl/7m5vM4XpZZYa4aQId4TWdbFtTu31eHGZ3eEC5nDRJ5NhZOJd\nKLFBu/xmxrh/eNZt4QbdWLZayjHnzyoy5AnfNTR6nJgPAv+rBOqyqT/r14q4Pngh\n3z0I3pNFr5qmxsp013XV+kgOW1F7zT7IMU8xRIgo85UWUNhax0/bjY/2NI1Z+WjR\nyhZmUBMVYWvfw97xDUrvBvrJxZPgg0lGvxJC6LF2dM7wgLaNx9khT6HMBVxjxLMs\nDQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"a61b695e2b86097d993e94e99fd15ec6d8fc8e9522948c9ff21c2f2c881093ae":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnayxhw6KeoKK+Ax9RW6v\n66YjrpRpGLewLmSSAzJGX8nL5/a2nEbXbeF9po265KcBSFWol8jLBsmG56ruwwxp\noWWhJPncqGqy8wMeRMmTf7ATGa+tk+To7UAQD0MYzt7rRlIdpqi9Us3J6076Z83k\n2sxFnX9sVflhOsotGWL7hmrn/CJWxKsO6OVCoqbIlnJV8xFazE2eCfaDTIEEEgnh\nLIGDsmv1AN8ImUIn/hyKcm1PfhDZrF5qhEVhfz5D8aX3cUcEJw8BvCaNloXyHf+y\nDKjqO/dJ7YFWVt7nPqOvaEkBQGMd54ETJ/BbO9r3WTsjXKleoPovBSQ/oOxApypb\nNQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"d499c8ca3c2018885037da89bdfa327e21a0e6a15e2ca8dabe5e78a3edf9c91c":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5oDytiywLDOSpIBovZxx\nNlZJg5Gk3O9kpiOQ0XnD+L2LV+a2dJU1KmBOoGCUr2TNaGTPihAStjpFIsW4c7Ye\nB2RjUFUrXRf3mvc3n4fACayenxtnCleSR4gKkAdHqqPCiWHT5TAtybKSHuHAluUL\nkMvavUZjIPMj0YYB0R8Re7BjU+zxnipJosTbbPQ7fa3+x2VAHc066Y9qp1YucdpB\nMZ3UwtSVNK7aCbFZvKPwAm22fnDYmMbYFeTz/rrl8k+rKTM37d4D3mURC9xDJxIP\nXVaU2dBImYjoFcY0/5oBU5vr1sj2sdUH+3G5AUr6iCL+XJLiwA1x24jKA6mUjQ93\ndwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":3,"url":"/root.json"},"snapshot":{"keys":{"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqTZx29eJR5EumjqM4YTb\nFlKbim1GNYmtbCLH51BbU2lt46ddmfGvtGsxTD3mIZ/GEHVFv6Aei3xx5nIfhGP0\nrG78JRz394uU8Pd62DiIFWYizr5o+ZBZu29D2YK5ZtxoLFpgt0ibnINK2NcesDC8\nSqfIUbMiQFT6yB/MYD275SjfRGHOeYTPmKdjMJrhLL2cfIPYnQ0QFYIyMvXBG1Fj\nU0rc9UclYQHh9YheIDVYI9YCo/DWP3KFfRJpoTjQRGoPSK9TXcpCAEzQpEG3jOek\n9PdV9Ol6/O8JbrFwXWF3LhkUThg+zCjV4qHtP4oqp5QCqzTQTXGQ9qxWUSlHi4Eu\nIwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/snapshot.json"},"timestamp":{"keys":{"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzTgV5iKhMnunUDxt4PB\npYqTMPaJN/ZdOOsP6cS3DeCE/EcYGfgCjvP7KD3gjG98VDBTVcuwZClSy+/zvHhV\nIq7VWu+yxQL5c6oa1xpCyHoA96JiLIDPhmqEdscdRybcRQ2CYywzKA8jSwEQCnEK\nc8a74ceY352l/MEcOem0+AtKrOjqcjbXCayDwC9yTg/c78bkp+4T8AhSWgt6Tlrt\nY8jLE7zwojFtIYtMwobWRIW2O3nJDXiSBbTPG3M9kF1G43INshSdBcuq5Tmy8lpE\n/XiG/E7+hP63Hm+KAcdvl553Zs7pLhAZxV0kqlApqRRwhscw+JQci8sVONun5t9t\nNwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/timestamp.json"}},"spec_version":"0.1.0","version":4}}tiup-1.16.3/tests/tiup-dm/run.sh000077500000000000000000000033141505422223000164430ustar00rootroot00000000000000#!/bin/bash set -eu # Change directory to the source directory of this script. Taken from: # https://stackoverflow.com/a/246128/3858681 pushd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # use run.sh --do-cases "test_cmd test_upgrade" to run specify cases do_cases="" while [[ $# -gt 0 ]] do key="$1" case $key in --native-ssh) echo "run using native ssh" export TIUP_NATIVE_SSH=true export GO_FAILPOINTS='github.com/pingcap/tiup/pkg/cluster/executor/assertNativeSSH=return(true)' shift # past argument ;; --do-cases) do_cases="$2" shift # past argument shift # past value ;; *) shift # esac done PATH=$PATH:/tiup-cluster/bin export TIUP_CLUSTER_PROGRESS_REFRESH_RATE=10s export TIUP_CLUSTER_EXECUTE_DEFAULT_TIMEOUT=300s export DEBUG_CHECKPOINT=1 export version=${version-nightly} # Prepare local config echo "preparing local config" ls -lh ./local rm -rf /tmp/local cp -r ./local /tmp/local ls -lh /tmp/local function tiup-dm() { mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ # echo "in function" if [ -f "./bin/tiup-dm.test" ]; then ./bin/tiup-dm.test -test.coverprofile=./cover/cov.itest-$(date +'%s')-$RANDOM.out __DEVEL--i-heard-you-like-tests "$@" else ../../bin/tiup-dm "$@" fi } . ./script/util.sh if [ "$do_cases" == "" ]; then for script in ./test_*.sh; do echo "run test: $script" . $script done else for script in "${do_cases[@]}"; do echo "run test: $script.sh" . ./$script.sh done fi echo -e "\033[0;36m<<< Run all test success >>>\033[0m" tiup-1.16.3/tests/tiup-dm/script/000077500000000000000000000000001505422223000166035ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/script/task/000077500000000000000000000000001505422223000175455ustar00rootroot00000000000000tiup-1.16.3/tests/tiup-dm/script/task/db1.prepare.sql000066400000000000000000000010471505422223000223730ustar00rootroot00000000000000drop database if exists `sharding1`; drop database if exists `sharding2`; create database `sharding1`; use `sharding1`; create table t1 (id bigint primary key, name varchar(80), info varchar(100)) DEFAULT CHARSET=utf8mb4; create table t2 (id bigint primary key, name varchar(80), info varchar(100)) DEFAULT CHARSET=utf8mb4; insert into t1 (id, name) values (10001, 'Gabriel García Márquez'), (10002, 'Cien años de soledad'); insert into t2 (id, name) values (20001, 'José Arcadio Buendía'), (20002, 'Úrsula Iguarán'), (20003, 'José Arcadio'); tiup-1.16.3/tests/tiup-dm/script/task/db2.prepare.sql000066400000000000000000000010431505422223000223700ustar00rootroot00000000000000drop database if exists `sharding1`; drop database if exists `sharding2`; create database `sharding1`; use `sharding1`; create table t2 (id bigint primary key, name varchar(80), info varchar(100)) DEFAULT CHARSET=utf8mb4; create table t3 (id bigint primary key, name varchar(80), info varchar(100)) DEFAULT CHARSET=utf8mb4; insert into t2 (id, name, info) values (40000, 'Remedios Moscote', '{}'); insert into t3 (id, name, info) values (30001, 'Aureliano José', '{}'), (30002, 'Santa Sofía de la Piedad', '{}'), (30003, '17 Aurelianos', NULL); tiup-1.16.3/tests/tiup-dm/script/task/run.sh000077500000000000000000000020621505422223000207100ustar00rootroot00000000000000#!/bin/bash # ./up.sh --dev --compose ./docker-compose.dm.yml # deploy a cluster using 'tests/tiup-dm/topo/full_dm.yaml' # run this script in control node to prepare some data and create the task. set -eu pushd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" tiup install dmctl:nightly ctl="tiup dmctl:nightly" wd=$(pwd) $ctl --master-addr n1:8261 operate-source create $wd/source1.yml $ctl --master-addr n1:8261 operate-source create $wd/source2.yml cat $wd/db1.prepare.sql | mysql -h mysql1 cat $wd/db2.prepare.sql | mysql -h mysql2 # the task will replicate data into db_target.t_target of tidb1:4000 echo "drop table if exists db_target.t_target;" | mysql -h tidb1 -P 4000 $ctl --master-addr n1:8261 start-task $wd/task.yaml # check data # should has 9 row in the prepare data, one for the header here. sleep 10 # wait to replicate line=$(echo "select * from db_target.t_target" | mysql -h tidb1 -P 4000 | wc -l) if [ $line = 10 ];then echo "replicate data success" else echo "fail to replicate data, line is $line" exit -1 fi tiup-1.16.3/tests/tiup-dm/script/task/source1.yml000066400000000000000000000002061505422223000216470ustar00rootroot00000000000000# MySQL1 Configuration. --- source-id: "mysql1" enable-gtid: true from: host: "mysql1" user: "root" password: "" port: 3306 tiup-1.16.3/tests/tiup-dm/script/task/source2.yml000066400000000000000000000002061505422223000216500ustar00rootroot00000000000000# MySQL2 Configuration. --- source-id: "mysql2" enable-gtid: true from: host: "mysql2" user: "root" password: "" port: 3306 tiup-1.16.3/tests/tiup-dm/script/task/task.yaml000066400000000000000000000016551505422223000214020ustar00rootroot00000000000000--- name: test task-mode: all shard-mode: "pessimistic" target-database: host: "tidb1" port: 4000 user: "root" password: "" mysql-instances: - source-id: "mysql1" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] mydumper-thread: 4 loader-thread: 16 syncer-thread: 16 - source-id: "mysql2" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] mydumper-thread: 4 loader-thread: 16 syncer-thread: 16 block-allow-list: instance: do-dbs: ["~^sharding[\\d]+"] do-tables: - db-name: "~^sharding[\\d]+" tbl-name: "~^t[\\d]+" routes: sharding-route-rules-table: schema-pattern: sharding* table-pattern: t* target-schema: db_target target-table: t_target sharding-route-rules-schema: schema-pattern: sharding* target-schema: db_target tiup-1.16.3/tests/tiup-dm/script/util.sh000077500000000000000000000025211505422223000201170ustar00rootroot00000000000000#!/bin/bash set -eu set -eE -o functrace failure() { local lineno=$2 local fn=$3 local exitstatus=$4 local msg=$5 local lineno_fns=${1% 0} if [[ "$lineno_fns" != "0" ]] ; then lineno="${lineno} ${lineno_fns}" fi echo "${BASH_SOURCE[1]}:${fn}[${lineno}] Failed with status ${exitstatus}: $msg" } trap 'failure "${BASH_LINENO[*]}" "$LINENO" "${FUNCNAME[*]:-script}" "$?" "$BASH_COMMAND"' ERR # instance_num # get the instance number of the dm # filter the output of the go test # PASS # coverage: 12.7% of statements in github.com/pingcap/tiup/components/dm/... function instance_num() { name=$1 count=$(tiup-dm display $name | grep "Total nodes" | awk -F ' ' '{print $3}') echo $count } # wait_instance_num_reach # wait the instance number of dm reach the target_num. # timeout 120 second function wait_instance_num_reach() { name=$1 target_num=$2 for ((i=0;i<120;i++)) do tiup-dm prune $name --yes count=$(instance_num $name) if [ "$count" == "$target_num" ]; then echo "instance number reach $target_num" return else sleep 1 fi sleep 1 done echo "fail to wait instance number reach $target_num, count $count, retry num: $i" tiup-dm display $name exit -1 } tiup-1.16.3/tests/tiup-dm/test_cmd.sh000077500000000000000000000131511505422223000174410ustar00rootroot00000000000000#!/bin/bash set -eu name=test_cmd topo=./topo/full_dm.yaml ipprefix=${TIUP_TEST_IP_PREFIX:-"172.19.0"} sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ # tiup-dm check $topo -i ~/.ssh/id_rsa --enable-mem --enable-cpu --apply # tiup-dm --yes check $topo -i ~/.ssh/id_rsa tiup-dm --yes deploy $name $version $topo -i ~/.ssh/id_rsa # topology doesn't contains the section `monitored` will not deploy node_exporter, blackbox_exporter has_exporter=0 tiup-dm exec $name -N $ipprefix.101 --command "ls /etc/systemd/system/{node,blackbox}_exporter-*.service" || has_exporter=1 if [[ $has_exporter -eq 0 ]]; then echo "monitoring agents should not be deployed for dm cluster if \"monitored\" section is not set." exit 1; fi tiup-dm list | grep "$name" # debug https://github.com/pingcap/tiup/issues/666 echo "debug audit:" ls -l ~/.tiup/storage/dm/audit/* head -1 ~/.tiup/storage/dm/audit/* tiup-dm audit echo "end debug audit" tiup-dm audit | grep "deploy $name $version" # Get the audit id can check it just runnable id=`tiup-dm audit | grep "deploy $name $version" | awk '{print $1}'` tiup-dm audit $id # check the local config tiup-dm exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/prometheus-9090/conf/dm_worker.rules.yml" tiup-dm exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/*.json" tiup-dm exec $name -N $ipprefix.101 --command "grep magic-string-for-test /home/tidb/deploy/alertmanager-9093/conf/alertmanager.yml" tiup-dm --yes start $name # check the data dir of dm-master tiup-dm exec $name -N $ipprefix.102 --command "grep /home/tidb/deploy/dm-master-8261/data /home/tidb/deploy/dm-master-8261/scripts/run_dm-master.sh" tiup-dm exec $name -N $ipprefix.103 --command "grep /home/tidb/my_master_data /home/tidb/deploy/dm-master-8261/scripts/run_dm-master.sh" # check the service enabled tiup-dm exec $name -N $ipprefix.102 --command "systemctl status dm-master-8261 | grep 'enabled;'" tiup-dm exec $name -N $ipprefix.102 --command "systemctl status dm-worker-8262 | grep 'enabled;'" # check enable/disable service tiup-dm disable $name -R dm-master tiup-dm exec $name -N $ipprefix.102 --command "systemctl status dm-master-8261 | grep 'disabled;'" tiup-dm exec $name -N $ipprefix.102 --command "systemctl status dm-worker-8262 | grep 'enabled;'" tiup-dm enable $name -R dm-master tiup-dm exec $name -N $ipprefix.102 --command "systemctl status dm-master-8261 | grep 'enabled;'" tiup-dm exec $name -N $ipprefix.102 --command "systemctl status dm-worker-8262 | grep 'enabled;'" tiup-dm --yes stop $name tiup-dm --yes restart $name tiup-dm display $name tiup-dm display $name --uptime total_sub_one=12 echo "start scale in dm-master" tiup-dm --yes scale-in $name -N $ipprefix.101:8261 wait_instance_num_reach $name $total_sub_one false # ensure Prometheus's configuration is updated automatically ! tiup-dm exec $name -N $ipprefix.101 --command "grep -q $ipprefix.101:8261 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" echo "start scale out dm-master" topo_master=./topo/full_scale_in_dm-master.yaml sed "s/__IPPREFIX__/$ipprefix/g" $topo_master.tpl > $topo_master tiup-dm --yes scale-out $name $topo_master tiup-dm exec $name -N $ipprefix.101 --command "grep -q $ipprefix.101:8261 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" tiup-dm exec $name -N $ipprefix.101 --command "systemctl status dm-master-8261 | grep 'enabled;'" echo "start scale in dm-worker" yes | tiup-dm scale-in $name -N $ipprefix.102:8262 wait_instance_num_reach $name $total_sub_one # ensure Prometheus's configuration is updated automatically ! tiup-dm exec $name -N $ipprefix.101 --command "grep -q $ipprefix.102:8262 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" echo "start scale out dm-worker" topo_worker=./topo/full_scale_in_dm-worker.yaml sed "s/__IPPREFIX__/$ipprefix/g" $topo_worker.tpl > $topo_worker yes | tiup-dm scale-out $name $topo_worker tiup-dm exec $name -N $ipprefix.101 --command "grep -q $ipprefix.102:8262 /home/tidb/deploy/prometheus-9090/conf/prometheus.yml" tiup-dm exec $name -N $ipprefix.101 --command "systemctl status dm-worker-8262 | grep 'enabled;'" echo "start scale in grafana" yes | tiup-dm scale-in $name -N $ipprefix.101:3000 wait_instance_num_reach $name $total_sub_one echo "start scale out grafana" topo_grafana=./topo/full_scale_in_grafana.yaml sed "s/__IPPREFIX__/$ipprefix/g" $topo_grafana.tpl > $topo_grafana yes | tiup-dm scale-out $name $topo_grafana # test grafana config tiup-dm exec $name -N $ipprefix.101 --command "ls /home/tidb/deploy/grafana-3000/dashboards/*.json && ! grep magic-string-for-test /home/tidb/deploy/grafana-3000/dashboards/*.json" # test create a task and can replicate data ./script/task/run.sh # test dm log dir tiup-dm notfound-command 2>&1 | grep $HOME/.tiup/logs/tiup-dm-debug TIUP_LOG_PATH=/tmp/a/b tiup-dm notfound-command 2>&1 | grep /tmp/a/b/tiup-dm-debug cp ~/.tiup/storage/dm/clusters/$name/ssh/id_rsa "/tmp/$name.id_rsa" tiup-dm --yes destroy $name # after destroy the cluster, the public key should be deleted ! ssh -o "StrictHostKeyChecking=no" -o "PasswordAuthentication=no" -i "/tmp/$name.id_rsa" tidb@$ipprefix.102 "ls" unlink "/tmp/$name.id_rsa" topo=./topo/full_dm_monitored.yaml ipprefix=${TIUP_TEST_IP_PREFIX:-"172.19.0"} sed "s/__IPPREFIX__/$ipprefix/g" $topo.tpl > $topo tiup-dm --yes deploy $name $version $topo -i ~/.ssh/id_rsa # topology contains the section `monitored` will deploy node_exporter, blackbox_exporter tiup-dm exec $name -N $ipprefix.101 --command "ls /etc/systemd/system/{node,blackbox}_exporter-*.service" tiup-dm --yes destroy $name tiup-1.16.3/tests/tiup-dm/test_import.sh000077500000000000000000000046171505422223000202170ustar00rootroot00000000000000#!/bin/bash set -eu # ref https://docs.pingcap.com/zh/tidb-data-migration/stable/deploy-a-dm-cluster-using-ansible # script following the docs to deploy dm 1.0.6 using ./ansible_data/inventory.ini function deploy_by_ansible() { # step 1 apt-get -y install git curl sshpass python-pip sudo # step 2 id tidb || useradd -m -d /home/tidb tidb echo "tidb:tidb" | chpasswd sed -i '/tidb/d' /etc/sudoers echo "tidb ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers # use the same key from root instead of create one. mkdir -p /home/tidb/.ssh cp ~/.ssh/* /home/tidb/.ssh/ chown -R tidb:tidb /home/tidb/.ssh/ # step 3 su tidb < ansible_data/hosts.ini sed "s/__IPPREFIX__/$ipprefix/g" ansible_data/inventory.ini.tpl > ansible_data/inventory.ini cp ./ansible_data/hosts.ini /home/tidb/dm-ansible/ cp ./ansible_data/inventory.ini /home/tidb/dm-ansible/ cd /home/tidb/dm-ansible # not following the docs, use root and without password to run it sudo ansible-playbook -i hosts.ini create_users.yml -u root #step 6 su tidb < $topo mkdir -p ~/.tiup/bin && cp -f ./root.json ~/.tiup/bin/ yes | tiup-dm deploy $name $old_version $topo -i ~/.ssh/id_rsa yes | tiup-dm start $name # tiup-dm _test $name writable yes | tiup-dm upgrade $name $version # test edit-config & reload # change the config of master and check it after reload # https://stackoverflow.com/questions/5978108/open-vim-from-within-bash-shell-script EDITOR=ex tiup-dm edit-config -y $name < $outfile 2>&1 & # wait $outfile generated sleep 3 trap "kill_all" EXIT # wait start cluster successfully n=0 while [ "$n" -lt 600 ] && ! grep -q "TiDB Playground Cluster is started" $outfile; do n=$(( n + 1 )) sleep 1 done n=0 while [ "$n" -lt 10 ] && ! tiup-playground display; do n=$(( n + 1 )) sleep 1 done tiup-playground scale-out --db 2 sleep 5 # ensure prometheus/data dir exists, # fix https://github.com/pingcap/tiup/issues/1039 ls "${TIUP_HOME}/data/test_play/prometheus/data" # 1(init) + 2(scale-out) check_instance_num tidb 3 # get pid of one tidb instance and scale-in pid=`tiup-playground display | grep "tidb" | awk 'NR==1 {print $1}'` tiup-playground scale-in --pid $pid sleep 5 check_instance_num tidb 2 # get pid of one tidb instance and kill it pid=`tiup-playground display | grep "tidb" | awk 'NR==1 {print $1}'` kill -9 $pid sleep 5 echo "*display after kill -9:" tiup-playground display tiup-playground display | grep "signal: killed" | wc -l | grep -q "1" # get pid of one tidb instance and kill it pid=`tiup-playground display | grep "tidb" | grep -v "killed" | awk 'NR==1 {print $1}'` kill $pid sleep 5 echo "*display after kill:" tiup-playground display tiup-playground display | grep -E "terminated|exit" | wc -l | grep -q "1" killall -2 tiup-playground.test || killall -2 tiup-playground sleep 100 # test restart with same data tiup-playground $TIDB_VERSION > $outfile 2>&1 & # wait $outfile generated sleep 3 # wait start cluster successfully timeout 300 grep -q "TiDB Playground Cluster is started" <(tail -f $outfile) cat $outfile | grep ":3930" | grep -q "Done" # start another cluster with tag TAG="test_1" outfile_1=/tmp/tiup-playground-test_1.out # no TiFlash to speed up tiup-playground $TIDB_VERSION --tag $TAG --db 2 --tiflash 0 > $outfile_1 2>&1 & sleep 3 timeout 300 grep -q "TiDB Playground Cluster is started" <(tail -f $outfile_1) tiup-playground --tag $TAG display | grep -qv "exit" # TiDB scale-out to 4 tiup-playground --tag $TAG scale-out --db 2 sleep 5 # TiDB scale-in to 3 pid=`tiup-playground --tag $TAG display | grep "tidb" | awk 'NR==1 {print $1}'` tiup-playground --tag $TAG scale-in --pid $pid sleep 5 # check number of TiDB instances. tidb_num=$(tiup-playground --tag $TAG display | grep "tidb" | wc -l | sed 's/ //g') if [ "$tidb_num" != 3 ]; then echo "unexpected tidb instance number: $tidb_num" exit 1 fi killall -2 tiup-playground.test || killall -2 tiup-playground sleep 100 # test for TiKV-CDC echo -e "\033[0;36m<<< Run TiKV-CDC test >>>\033[0m" tiup-playground $TIDB_VERSION --db 1 --pd 1 --kv 1 --tiflash 0 --kvcdc 1 --kvcdc.version v1.0.0 > $outfile 2>&1 & sleep 3 timeout 300 grep -q "TiDB Playground Cluster is started" <(tail -f $outfile) tiup-playground display | grep -qv "exit" # scale out tiup-playground scale-out --kvcdc 2 sleep 5 check_instance_num tikv-cdc 3 # 1(init) + 2(scale-out) # scale in pid=`tiup-playground display | grep "tikv-cdc" | awk 'NR==1 {print $1}'` tiup-playground scale-in --pid $pid sleep 5 check_instance_num tikv-cdc 2 # exit all killall -2 tiup-playground.test || killall -2 tiup-playground sleep 30 # test for TiProxy echo -e "\033[0;36m<<< Run TiProxy test >>>\033[0m" tiup-playground $TIDB_VERSION --db 1 --pd 1 --kv 1 --tiflash 0 --tiproxy 1 --tiproxy.version "nightly" > $outfile 2>&1 & sleep 3 timeout 300 grep -q "TiDB Playground Cluster is started" <(tail -f $outfile) tiup-playground display | grep -qv "exit" # scale out tiup-playground scale-out --tiproxy 1 sleep 5 check_instance_num tiproxy 2 # scale in pid=`tiup-playground display | grep "tiproxy" | awk 'NR==1 {print $1}'` tiup-playground scale-in --pid $pid sleep 5 check_instance_num tiproxy 1 # exit all killall -2 tiup-playground.test || killall -2 tiup-playground sleep 30 echo -e "\033[0;36m<<< Run all test success >>>\033[0m" tiup-1.16.3/tests/tiup/000077500000000000000000000000001505422223000147015ustar00rootroot00000000000000tiup-1.16.3/tests/tiup/.gitignore000066400000000000000000000000131505422223000166630ustar00rootroot00000000000000manifests/ tiup-1.16.3/tests/tiup/bin/000077500000000000000000000000001505422223000154515ustar00rootroot00000000000000tiup-1.16.3/tests/tiup/bin/root.json000066400000000000000000000161531505422223000173350ustar00rootroot00000000000000{"signatures":[{"keyid":"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002","sig":"cHrebh9dB5z0ypRdAGKso4xBhCp6mY8pZ3ul6Ggzc5+WGU1rOGHEJPS3SP1F7FaBUu4A4aytvQ1fn0fzL20HyiwtSXu7nFzrO5MiGMNa3afyGYxGjHNWZZmYm/6+eZ9fqt7erTrtJgvuiV4VgmzCbQ3uIFEt89tRlsmkFIQ2bry4Z9ml06b/zT243pO7uInU2On+W3/ggluIoATubggzNxhB0OSqREwNfEUACq4N5UzqRwinCURISk/xUHU4/n/P9VraSocXjcMCryrsvaYHEq0AZMNNUPhSn5ow4kPqeELgb0NZxvG5wo276LngCQUqk7FvcxWSH9gMR3VcBc3OnQ=="},{"keyid":"b128ee6a42e2665bd45aa2fa4a7b6e098cfedb3911154f300634b2c056214b9e","sig":"aFlunIdjZAnm2/tY9QqKGGM2XxVM/cvNpmHrs3Z9BoGzLhOoubhPi70cEfoE2/NQX43SDKr97KT6cPY3XpkNa7mQtVxXEX1/hxc0U6qRFwdAfnV0EzuJeBfn33pfhA/TKVd6FGOEOtrLYrzigLFOuPYrQrvvfWEJGL9merz6kpjXEtTDX+378TQ5wzvphNYDVI3Hp8S+cvatwn+85agzlc4zOrRkp9TMz+vnJZSwQ6eoYqrHFQkuACto+N500lizueeEOSt2Ke17TjxeZIka192XlojHp3iiIm0TsI04DJCAsQ7yliT22BkI/NO+DhWecruJIfb7KfaTmxSG+nddiA=="},{"keyid":"2001b18089c9a865cebee793dbc23f9b1f5ce457f96b8d0b2c0909c26b00c643","sig":"H2qSlXtLWHiECouClS86Enw7pCdbM1duEyG3IcblU8pDmiu72T8SNsz4nq3TZw0hFSqAVw5udVenQ1d3sxQDix1/DCYi6PhkM/ghhyWr6+Ko2mJMT+ejqcCYFvbrb2UoeDEeS85obzi2UPELNC5iBt8iJlRtLzRvpKN+0pTwKhusnLUgan9ZLnRJsZdw2U67W2u/dABxpYsbXDYGa/4M/MkA/NEZhJ53VCvxr8RweBdEeVfKmzF/Ipkxn49aB4hlPgH/2E015U9gu7UQffiWRf+ww3eHgzjp1ItJhclUTbI0Q7BBJ4A5oYWP9DLRab+07y8+iNZmj7wTPvCS+V0+Fw=="},{"keyid":"545c31fd615bbaa4c5424509a9305eb280e019996b043a576dc12b758aa0890a","sig":"PAK7AKXTx61/MKT7ml1QrDaPlbIYZ+UmG+6xgDN/P3ForyqvLgTiqif7ZqZMlUOEXEo/aOZIcJS5yt6xp+XrQT5Dj0OAemmn7kHaYuhl5A3HyMksm2TBGxQXRByyrqsD5OtzWKHXgOIByMBBFnJbPqp3cDU+SoRYizVW114iDo0PXPXQPeX/ffVWkw7kP3l4JBgcUhCrLCND6temGJT7hAMkfPWeDeuuNlyy3ajXH+ENm6DR/RthFi1ys1rAu2j6bDwOpOu3FIzFFdGTs0fJz8tJeLIUejoDJCOwSh7qnpeIBM7cDJlT9sZrVUL5Oluelj4tgoJCO1BN8o7myVCGDw=="},{"keyid":"3f32d76cac38c9d87232ed7570132deb343a68bce4b12d365b8ab61085e1a633","sig":"2UTi6cgCF/EELQtGWU2ENk458JrFymHn1S8aVv0q3B1YoHR92Mm5Ck0FbFkfg9ySOWiEqaesIvtC8FafrpTnbVUrivYnjKOXGzvVBBeQR/Ri44rql9x+6WzjJ4p6/M+FqZEpHisx0I5AFYf2fQkL1Hztmv6yhQTrW3GOh9SVRYlZwEqjeDqm4wIFGZ6amaMr7xu2zxlTEMtz4Gd9zlZVN4Wr6qeYtkW7YLZcFECTzpwHgHJswzljhG5XTxevI/Tz4eKYOgyQCtPC/JS0HOMiTVc6d2XRs5NNvP2a4/LH1tHZRupMAibklwqiwLKfMwVl+tncb9hedxUSMPmo/CMrxw=="}],"signed":{"_type":"root","expires":"2021-05-26T11:18:30+08:00","roles":{"index":{"keys":{"7fce7ec4f9c36d51dec7ec96065bb64958b743e46ea8141da668cd2ce58a9e61":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn5kVA7MlBfSe7EBaExjl\nKbwoDkn1aYi74s29mFgtRo8nejbrVvZQMCIUhvKc0pFa/l9JD/QY6/nAOCE1lpzi\nwwNkSntfOo3p3HQIR+Ut7hZ4Sxfe/5JagGo3LQ+Hd3EJWUxyEfQ/Bff07F3XAbqM\n5+cKNrdsKWZJcPiJDW621qGwCx52f+gzl9bnFe4/hx34OUgirwqh5DS+LhIO+/yt\nbOiN1AyjQKlnb8lUnblElS4Njd+F4io5VzSrZYi2+4AbTkO6wLwbsWHMzXfv9qwn\nvllufOHpB6EwiQ/xBOMuvJJymHnZvs8AH4SuydQIXLaJuv1ysFaBs0KB/ktbakSK\nLwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/index.json"},"root":{"keys":{"2001b18089c9a865cebee793dbc23f9b1f5ce457f96b8d0b2c0909c26b00c643":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1LB2sQGelCEaKTKPzdim\n5V0RrdDOyZTSsZlWjzqZQn7lIRKG9Yjah//ReBdy3gmTwbZWUYzGFeclS+1+H05f\nvvxJUN1ttNgy6xsKql6s1ZhdwoBLbkjTqHjbRRQ2+fMJQdhusb1TXEP5Vut2jlyo\nSoGSa9mDC0VbGW9Xs/4HqfyH6m4dV6GeFYwDUX0ok6l6DHk28UIFyieKITFNkrKv\n5xoUPS3P49tX7wprXiFBKiP1Tr72O+GSTBFXuUhPASBVCXoxj7g5fB024P464ku/\nTHECfX1F5q7htz2zkgn7V9A9kedASwoqbrC5glHXfrfiQOctHkyKaGLswWe+8OAp\n5wIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"3f32d76cac38c9d87232ed7570132deb343a68bce4b12d365b8ab61085e1a633":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2usmq9K1PdMUEr2AUmC/\n1l8RxbAIhyKzoA0O7zEpIunjU58BP5Ht3APPGAXqg3/lyeTcn8/h+ADEZElrgXCl\nnbsR37R3zmj+/z/M1icp/1O6gaIpHEUz5U2ryPkj9vy5TVE+7uB12x7SBDg3w5mk\nHc0NVPhHBqD9tvXlzzZSHqNjRdM+g5OdCGVzKpt/2LrCIv6MSxoxsqNP3N8I8Tb/\nunwVfhjRjXG23mjLukGPjxJUoXO38WujVWER5ZafhyZDt9VFNdPoGl2kN23aQM4p\n/gyeUgwTfeVqlXYOeErfI9AznnJA12WHTmIMNWpz3NK+c3P4GGTOZcRjnfr0J9wa\ndQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"545c31fd615bbaa4c5424509a9305eb280e019996b043a576dc12b758aa0890a":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxzlxJStPfdjMERTUs2GH\nMKMPAflgMTId3rhKsyLGAoRraAE3+crZEkXz+sEgCSW7590qDofcZYFeS9QOebD2\nI1/PYbDqMOwWkRSta6BRJyhgGKmG8QuxiYQQEQSgBhTQap3jnxiduXiZ+6uTiNkS\n44/Z12GN+vXLDLCVBlxFZx2Am9QFVCyP7f9Dxj0EkaVKRGu6+utjaWGyQLq5splk\nNbFvMLYJLkzrk8dzLwr1E85NRCAVLnRJR4fYllglJmJi6laHdOgXf9GOL1vQ/qUh\nRXqYkGiZ/15vurMMyUaIdzLY95XHw6vsjOwV9kBs8z/cxBVLxpNWUiOBsfDpmBc3\nCwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"9b3cea98f6f23cc11813b12d0526a1b6cfb3761008f0882c9caa8db742d63002":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsOgQkwLOh31QV9OpbO9v\n6o83durJFGPOnVXZiab83pKaSk7HEK9WzXBq0BaPvtFwSfROVdpgtopri5lZi+uH\naMKLUn5F8XRnSMl/7m5vM4XpZZYa4aQId4TWdbFtTu31eHGZ3eEC5nDRJ5NhZOJd\nKLFBu/xmxrh/eNZt4QbdWLZayjHnzyoy5AnfNTR6nJgPAv+rBOqyqT/r14q4Pngh\n3z0I3pNFr5qmxsp013XV+kgOW1F7zT7IMU8xRIgo85UWUNhax0/bjY/2NI1Z+WjR\nyhZmUBMVYWvfw97xDUrvBvrJxZPgg0lGvxJC6LF2dM7wgLaNx9khT6HMBVxjxLMs\nDQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"},"b128ee6a42e2665bd45aa2fa4a7b6e098cfedb3911154f300634b2c056214b9e":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0kgU3v3SxYzm5J6P+GPP\nmy6toBnKYtTViAVmpUJiIEjdZ9NLpoJU0na9q0CD8sIgo2Js/W/owJUvSj6rm8us\nsu/Ve5KsoJN6zca2am1uZ5IKnc48i0mCv76WXawCxM+NFGqSCMJcltlhj3fC/GDS\ngu+BiIbrgR1PgJf6Jk6l7uMJdN3TL6JJQcEC4lz+2hj5zoVNYkq06ZC79j2tPDCI\nkTAYGF/TAAVLH08/kGH5ZeRPlVKJ7cwW3OniLM5NeFnS8+shRNb6AYr7xju3Ikbw\nDo14ipIghBI0iAxn6Lvr/iilc7TM7RWJ4OiTrmK3SQSJ+U6H2N2/I5OGEHBEKzbA\nOQIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":3,"url":"/root.json"},"snapshot":{"keys":{"8660a9f40687fb33e6f8ad563f21ee81b9ce7b91c90827cc7ae2416c5e0e94e9":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqTZx29eJR5EumjqM4YTb\nFlKbim1GNYmtbCLH51BbU2lt46ddmfGvtGsxTD3mIZ/GEHVFv6Aei3xx5nIfhGP0\nrG78JRz394uU8Pd62DiIFWYizr5o+ZBZu29D2YK5ZtxoLFpgt0ibnINK2NcesDC8\nSqfIUbMiQFT6yB/MYD275SjfRGHOeYTPmKdjMJrhLL2cfIPYnQ0QFYIyMvXBG1Fj\nU0rc9UclYQHh9YheIDVYI9YCo/DWP3KFfRJpoTjQRGoPSK9TXcpCAEzQpEG3jOek\n9PdV9Ol6/O8JbrFwXWF3LhkUThg+zCjV4qHtP4oqp5QCqzTQTXGQ9qxWUSlHi4Eu\nIwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/snapshot.json"},"timestamp":{"keys":{"66d4ea1da00076c822a6e1b4df5eb1e529eb38f6edcedff323e62f2bfe3eaddd":{"keytype":"rsa","keyval":{"public":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzTgV5iKhMnunUDxt4PB\npYqTMPaJN/ZdOOsP6cS3DeCE/EcYGfgCjvP7KD3gjG98VDBTVcuwZClSy+/zvHhV\nIq7VWu+yxQL5c6oa1xpCyHoA96JiLIDPhmqEdscdRybcRQ2CYywzKA8jSwEQCnEK\nc8a74ceY352l/MEcOem0+AtKrOjqcjbXCayDwC9yTg/c78bkp+4T8AhSWgt6Tlrt\nY8jLE7zwojFtIYtMwobWRIW2O3nJDXiSBbTPG3M9kF1G43INshSdBcuq5Tmy8lpE\n/XiG/E7+hP63Hm+KAcdvl553Zs7pLhAZxV0kqlApqRRwhscw+JQci8sVONun5t9t\nNwIDAQAB\n-----END PUBLIC KEY-----\n"},"scheme":"rsassa-pss-sha256"}},"threshold":1,"url":"/timestamp.json"}},"spec_version":"0.1.0","version":1}}tiup-1.16.3/tests/tiup/test_tiup.sh000077500000000000000000000033431505422223000172630ustar00rootroot00000000000000#!/usr/bin/env bash set -eux TEST_DIR=$(cd "$(dirname "$0")"; pwd) TMP_DIR=`mktemp -d` mkdir -p $TEST_DIR/cover function tiup() { # echo "in function" if [ -f "$TEST_DIR/bin/tiup.test" ]; then $TEST_DIR/bin/tiup.test -test.coverprofile=$TEST_DIR/cover/cov.itest-$(date +'%s')-$RANDOM.out __DEVEL--i-heard-you-like-tests "$@" else $TEST_DIR/../../bin/tiup "$@" fi } rm -rf $TMP_DIR/data # Profile home directory mkdir -p $TMP_DIR/home/bin/ export TIUP_HOME=$TMP_DIR/home tiup mirror set --reset tiup list tiup tiup help tiup install tidb:v5.2.2 tiup install tidb:v3.0.13 tiup update tidb tiup update tidb --nightly tiup --binary tidb:nightly tiup status tiup clean --all tiup help tidb tiup env TIUP_SSHPASS_PROMPT="password" tiup env TIUP_SSHPASS_PROMPT | grep password # test mirror CMP_TMP_DIR=`mktemp -d` cat > $CMP_TMP_DIR/hello.sh << EOF #! /bin/sh echo "hello, TiDB" EOF chmod 755 $CMP_TMP_DIR/hello.sh tar -C $CMP_TMP_DIR -czf $CMP_TMP_DIR/hello.tar.gz hello.sh tiup mirror genkey TEST_MIRROR_A=`mktemp -d` tiup mirror init $TEST_MIRROR_A tiup mirror set $TEST_MIRROR_A tiup mirror grant pingcap echo "should fail" ! tiup mirror grant pingcap # this should failed tiup mirror publish hello v0.0.1 $CMP_TMP_DIR/hello.tar.gz hello.sh tiup hello:v0.0.1 | grep TiDB TEST_MIRROR_B=`mktemp -d` tiup mirror init $TEST_MIRROR_B tiup mirror set $TEST_MIRROR_B tiup mirror grant pingcap tiup mirror publish hello v0.0.2 $CMP_TMP_DIR/hello.tar.gz hello.sh tiup mirror set $TEST_MIRROR_A tiup mirror merge $TEST_MIRROR_B tiup hello:v0.0.2 | grep TiDB tiup uninstall tiup uninstall tidb:v3.0.13 tiup uninstall tidb --all tiup uninstall --all tiup uninstall --self rm -rf $TMP_DIR $CMP_TMP_DIR $TEST_MIRROR_A $TEST_MIRROR_B tiup-1.16.3/tests/tiup/tiup.toml000066400000000000000000000000541505422223000165560ustar00rootroot00000000000000mirror = "https://tiup-mirrors.pingcap.com" tiup-1.16.3/tools/000077500000000000000000000000001505422223000137165ustar00rootroot00000000000000tiup-1.16.3/tools/check/000077500000000000000000000000001505422223000147735ustar00rootroot00000000000000tiup-1.16.3/tools/check/check-tidy.sh000077500000000000000000000020001505422223000173460ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright 2019 PingCAP, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # See the License for the specific language governing permissions and # limitations under the License. # # set is used to set the environment variables. # -e: exit immediately when a command returning a non-zero exit code. # -u: treat unset variables as an error. # -o pipefail: sets the exit code of a pipeline to that of the rightmost command to exit with a non-zero status, # or to zero if all commands of the pipeline exit successfully. set -euo pipefail # go mod tidy do not support symlink cd -P . cp go.sum /tmp/go.sum.before GO111MODULE=on go mod tidy diff -q go.sum /tmp/go.sum.before tiup-1.16.3/tools/check/errcheck_excludes.txt000066400000000000000000000000421505422223000212120ustar00rootroot00000000000000fmt.Fprintf fmt.Fprint fmt.Sscanf tiup-1.16.3/tools/check/golangci.yaml000066400000000000000000000007741505422223000174520ustar00rootroot00000000000000linters-settings: govet: enable: - nilness errcheck: exclude-functions: - (*mime/multipart.Form).RemoveAll gocritic: disabled-checks: - ifElseChain linters: disable-all: true enable: - bodyclose - errcheck - gofmt - goimports - gosimple - govet - ineffassign # - interfacer - misspell - rowserrcheck - staticcheck - typecheck - unconvert - unused - whitespace - gocritic # - goconst # - gocyclo tiup-1.16.3/tools/check/revive.toml000066400000000000000000000024041505422223000171700ustar00rootroot00000000000000ignoreGeneratedHeader = false severity = "error" confidence = 0.8 errorCode = -1 warningCode = 0 [rule.atomic] [rule.blank-imports] [rule.context-as-argument] [rule.duplicated-imports] [rule.error-return] [rule.error-strings] [rule.error-naming] [rule.confusing-naming] [rule.unexported-naming] [rule.exported] [rule.if-return] [rule.var-declaration] [rule.var-naming] [rule.range] [rule.range-val-in-closure] [rule.range-val-address] [rule.receiver-naming] [rule.indent-error-flow] [rule.superfluous-else] [rule.modifies-parameter] [rule.modifies-value-receiver] [rule.time-equal] [rule.time-naming] [rule.empty-block] [rule.empty-lines] [rule.get-return] [rule.unnecessary-stmt] [rule.struct-tag] [rule.string-of-int] [rule.string-format] [rule.constant-logical-expr] [rule.bool-literal-in-expr] [rule.optimize-operands-order] [rule.redefines-builtin-id] [rule.waitgroup-by-value] [rule.unconditional-recursion] [rule.use-any] [rule.cognitive-complexity] severity = "error" arguments =[100] #[rule.cyclomatic] # severity = "warning" # arguments = [48] [imports-blacklist] arguments =["crypto/md5", "crypto/sha1", "io/ioutil"] [rule.defer] arguments=[["call-chain", "method-call", "return"]] # This can be checked by other tools like megacheck [rule.unreachable-code]