pax_global_header00006660000000000000000000000064147647653200014530gustar00rootroot0000000000000052 comment=8b490c0159f2d3b80d1721c1e632a021f496f375 systemd_exporter-0.7.0/000077500000000000000000000000001476476532000151545ustar00rootroot00000000000000systemd_exporter-0.7.0/.circleci/000077500000000000000000000000001476476532000170075ustar00rootroot00000000000000systemd_exporter-0.7.0/.circleci/config.yml000066400000000000000000000023631476476532000210030ustar00rootroot00000000000000--- version: 2.1 orbs: prometheus: prometheus/prometheus@0.17.1 executors: # Whenever the Go version is updated here, .promu.yml should # also be updated. golang: docker: - image: cimg/go:1.24 jobs: test: executor: golang steps: - prometheus/setup_environment - run: make - prometheus/store_artifact: file: systemd_exporter workflows: version: 2 systemd_exporter: jobs: - test: filters: tags: only: /.*/ - prometheus/build: name: build filters: tags: only: /.*/ - prometheus/publish_main: context: org-context docker_hub_organization: prometheuscommunity quay_io_organization: prometheuscommunity requires: - test - build filters: branches: only: main - prometheus/publish_release: context: org-context docker_hub_organization: prometheuscommunity quay_io_organization: prometheuscommunity requires: - test - build filters: tags: only: /^v.*/ branches: ignore: /.*/ systemd_exporter-0.7.0/.github/000077500000000000000000000000001476476532000165145ustar00rootroot00000000000000systemd_exporter-0.7.0/.github/dependabot.yml000066400000000000000000000001561476476532000213460ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "gomod" directory: "/" schedule: interval: "monthly" systemd_exporter-0.7.0/.github/workflows/000077500000000000000000000000001476476532000205515ustar00rootroot00000000000000systemd_exporter-0.7.0/.github/workflows/container_description.yml000066400000000000000000000044671476476532000256740ustar00rootroot00000000000000--- name: Push README to Docker Hub on: push: paths: - "README.md" - "README-containers.md" - ".github/workflows/container_description.yml" branches: [ main, master ] permissions: contents: read jobs: PushDockerHubReadme: runs-on: ubuntu-latest name: Push README to Docker Hub if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set docker hub repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to Dockerhub uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 env: DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }} DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }} with: destination_container_repo: ${{ env.DOCKER_REPO_NAME }} provider: dockerhub short_description: ${{ env.DOCKER_REPO_NAME }} # Empty string results in README-containers.md being pushed if it # exists. Otherwise, README.md is pushed. readme_file: '' PushQuayIoReadme: runs-on: ubuntu-latest name: Push README to quay.io if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. steps: - name: git checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set quay.io org name run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV - name: Set quay.io repo name run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV - name: Push README to quay.io uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1 env: DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }} with: destination_container_repo: ${{ env.DOCKER_REPO_NAME }} provider: quay # Empty string results in README-containers.md being pushed if it # exists. Otherwise, README.md is pushed. readme_file: '' systemd_exporter-0.7.0/.github/workflows/golangci-lint.yml000066400000000000000000000023451476476532000240270ustar00rootroot00000000000000--- # This action is synced from https://github.com/prometheus/prometheus name: golangci-lint on: push: paths: - "go.sum" - "go.mod" - "**.go" - "scripts/errcheck_excludes.txt" - ".github/workflows/golangci-lint.yml" - ".golangci.yml" pull_request: permissions: # added using https://github.com/step-security/secure-repo contents: read jobs: golangci: permissions: contents: read # for actions/checkout to fetch code pull-requests: read # for golangci/golangci-lint-action to fetch pull requests name: lint runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install Go uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: 1.23.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: args: --verbose version: v1.62.0 systemd_exporter-0.7.0/.gitignore000066400000000000000000000000451476476532000171430ustar00rootroot00000000000000/systemd_exporter /bin/golangci-lint systemd_exporter-0.7.0/.golangci.yml000066400000000000000000000004731476476532000175440ustar00rootroot00000000000000--- linters: enable: - misspell - revive - sloglint issues: exclude-rules: - path: _test.go linters: - errcheck linters-settings: errcheck: exclude-functions: # Used in HTTP handlers, any error is handled by the server itself. - (net/http.ResponseWriter).Write systemd_exporter-0.7.0/.promu.yml000066400000000000000000000013461476476532000171230ustar00rootroot00000000000000go: # Whenever the Go version is updated here, .travis.yml and # .circle/config.yml should also be updated. version: 1.24 repository: path: github.com/prometheus-community/systemd_exporter build: binaries: - name: systemd_exporter ldflags: | -X github.com/prometheus/common/version.Version={{.Version}} -X github.com/prometheus/common/version.Revision={{.Revision}} -X github.com/prometheus/common/version.Branch={{.Branch}} -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} tarball: files: - LICENSE - NOTICE crossbuild: platforms: - linux systemd_exporter-0.7.0/.travis.yml000066400000000000000000000000341476476532000172620ustar00rootroot00000000000000language: go go: - "1.x" systemd_exporter-0.7.0/.yamllint000066400000000000000000000007171476476532000170130ustar00rootroot00000000000000--- extends: default ignore: | **/node_modules rules: braces: max-spaces-inside: 1 level: error brackets: max-spaces-inside: 1 level: error commas: disable comments: disable comments-indentation: disable document-start: disable indentation: spaces: consistent indent-sequences: consistent key-duplicates: ignore: | config/testdata/section_key_dup.bad.yml line-length: disable truthy: check-keys: false systemd_exporter-0.7.0/CHANGELOG.md000066400000000000000000000046621476476532000167750ustar00rootroot00000000000000## main / unreleased ## 0.7.0 / 2025-03-14 * [CHANGE] Used TrimLeft instead of regex for boot times metrics #124 * [CHANGE] Switch logging to slog #142 * [FEATURE] Add boot stage timestamp collector #110 * [FEATURE] Add watchdog metrics #111 * [FEATURE] Add systemd-resolved metrics collector #119 ## 0.6.0 / 2023-10-30 * [CHANGE] Remove broken metrics collection #68 * [CHANGE] Remove use of cgroups #105 * [FEATURE] Add unit timestamp metrics #58 ## 0.5.0 / 2022-07-20 Now released under the Prometheus Community * [CHANGE] systemd_service_restart_total changed label name from `type` to `name` #32 * [CHANGE] Remove prommod metrics #36 * [CHANGE] Rename collector flags #53 * [FEATURE] Add TLS and Basic Auth #54 ## 0.4.0 / 2020-04-23 * New option `--collector.user`, which allows users to connect to the user systemd instance. Thanks @pelov :) * New feature `--collector.enable-ip-accounting`, enables service ip accounting metrics. This feature only works with systemd 235 and above. thanks @jwhited :) ## 0.3.0 / 2020-03-28 ### **Breaking changes** * `systemd_unit_state` label `type` has new meaning Now shows Unit type (`service`, `scope`, etc), not Service Unit types (`simple`, `forking`, etc) or mount unit types(`aufs`,`ext3`, etc). Service and mount types have been moved to `systemd_unit_info` ### Changes - [CHANGE] Important! Docker images are now published to `povilasv/systemd-exporter` and `quay.io/povilasv/systemd-exporter` registry. - [FEATURE] Add support for amd64, 386, arm, arm64, mips, mipsle, mips64, mips64le, ppc64, ppc64le, s390x docker images. - [FEATURE] Read unit CPU usage from cgroup. Added `systemd_unit_cpu_seconds_total` metric. **Note** - Untested on unified hierarchy - [FEATURE] Add `systemd_unit_info` with metainformation about units incl. subtype specific info - [ENHANCEMENT] Added `type` label to all metrics named `systemd_unit-*` to support PromQL grouping * [ENHANCEMENT] `systemd_unit_state` works for all unit types, not just service and mount units * [ENHANCEMENT] Scrapes are approx 80% faster. If needed, set GOMAXPROCS to limit max concurrency * [CHANGE] Start tracking metric cardinality in readme * [CHANGE] Expanded default set of unit types monitored. Only device unit types are not enabled by default * [BUGFIX] `timer_last_trigger_seconds` metric is now exported as expected for all timers ## 0.2.0 / 2019-03-20 * [CLEANUP] Introduced changelog. From now on, changes will be reported in this file. systemd_exporter-0.7.0/CODE_OF_CONDUCT.md000066400000000000000000000002301476476532000177460ustar00rootroot00000000000000# Prometheus Community Code of Conduct Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). systemd_exporter-0.7.0/Dockerfile000066400000000000000000000005251476476532000171500ustar00rootroot00000000000000ARG ARCH="amd64" ARG OS="linux" FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest LABEL maintainer="The Prometheus Authors " ARG ARCH="amd64" ARG OS="linux" COPY .build/${OS}-${ARCH}/systemd_exporter /bin/systemd_exporter EXPOSE 9558 USER nobody ENTRYPOINT ["/bin/systemd_exporter"] systemd_exporter-0.7.0/LICENSE000066400000000000000000000261351476476532000161700ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. systemd_exporter-0.7.0/Makefile000066400000000000000000000015051476476532000166150ustar00rootroot00000000000000# Copyright 2022 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Needs to be defined before including Makefile.common to auto-generate targets DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x DOCKER_REPO ?= prometheuscommunity include Makefile.common STATICCHECK_IGNORE = DOCKER_IMAGE_NAME ?= systemd-exporter systemd_exporter-0.7.0/Makefile.common000066400000000000000000000221631476476532000201070ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A common Makefile that includes rules to be reused in different prometheus projects. # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : # Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" # # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... ifeq (arm, $(GOHOSTARCH)) GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_VERSION ?= v1.62.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifneq (,$(SKIP_GOLANGCI_LINT)) GOLANGCI_LINT := else ifeq (,$(CIRCLE_JOB)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint endif endif endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKERFILE_PATH ?= ./Dockerfile DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 test-flags := -race endif endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; .PHONY: common-all common-all: precheck style check_license lint yamllint unused build test .PHONY: common-style common-style: @echo ">> checking code style" @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ if [ -n "$${fmtRes}" ]; then \ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ echo "Please ensure you are using $$($(GO) version) for formatting code."; \ exit 1; \ fi .PHONY: common-check_license common-check_license: @echo ">> checking license header" @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi .PHONY: common-deps common-deps: @echo ">> getting dependencies" $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ $(GO) get -d $$m; \ done $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format common-format: @echo ">> formatting code" $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-lint-fix common-lint-fix: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint fix" $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" ifeq (, $(shell command -v yamllint 2> /dev/null)) @echo "yamllint not installed so skipping" else yamllint . endif # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint .PHONY: common-unused common-unused: @echo ">> running check for unused/missing packages in go.mod" $(GO) mod tidy @git diff --exit-code -- go.sum go.mod .PHONY: common-build common-build: promu @echo ">> building binaries" $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) .PHONY: common-docker-repo-name common-docker-repo-name: @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): $(eval PROMU_TMP := $(shell mktemp -d)) curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) mkdir -p $(FIRST_GOPATH)/bin cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ | sed -e '/install -d/d' \ | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif .PHONY: precheck precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ exit 1; \ fi endef govulncheck: install-govulncheck govulncheck ./... install-govulncheck: command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest systemd_exporter-0.7.0/README.md000066400000000000000000000231661476476532000164430ustar00rootroot00000000000000# Systemd exporter [![CircleCI](https://circleci.com/gh/prometheus-community/systemd_exporter/tree/master.svg?style=shield)](circleci) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus-community/systemd_exporter)](https://goreportcard.com/report/github.com/prometheus-community/systemd_exporter) [![Docker Pulls](https://img.shields.io/docker/pulls/prometheuscommunity/systemd-exporter.svg?maxAge=604800)](https://hub.docker.com/r/prometheuscommunity/systemd-exporter) [![Docker Repository on Quay](https://quay.io/repository/prometheuscommunity/systemd-exporter/status)](https://quay.io/repository/prometheuscommunity/systemd-exporter) Prometheus exporter for systemd units, written in Go. ## Relation to Node and Process Exporter The focus on each exporter is different. For example, the [node_exporter](https://github.com/prometheus/node_exporter/) has a broad node level focus, and the [process-exporter](https://github.com/ncabatoff/process-exporter) is focused on deep analysis of specific processes. systemd-exporter aims to fit in the middle, taking advantage of systemd's builtin process and thread grouping to provide application-level metrics. | Exporter | Metric Goals | Example | | ---------------- | ------------------------------------------ | -------------------------------------------------- | | node-exporter | Machine-wide monitoring and usage summary | CPU usage of entire node (e.g. `127.0.0.1`) | | systemd-exporter | Systemd unit monitoring and resource usage | The state of each service (e.g. `mongodb.service`) | | process-exporter | Focus is on individual processes | CPU of specific process ID (e.g. `1127`) | | cAdvisor | Metrics per cgroup (systemd uses cgroups) | CPU usage of each cgroup (e.g. `system.slice/mongodb.service` Systemd groups processes, threads, and other resources (PIDs, memory, etc) into logical containers called units. Systemd-exporter will read the 11 different types of systemd units (e.g. service, slice, etc) and give you metrics about the health and resource consumption of each unit. This allows an application specific view of your system, allowing you to determine resource usage of an application such as `mysql.service` independently from the resources used by other processes on your system. This clearly allows more granular monitoring than machine-level metrics. However, we do not export metrics for specific processes or threads. Said another way, the granularity of systemd-exporter is limited by the size of the systemd unit. If you've chosen to pack 400 threads and 20 processes inside the `mysql.service`, we will only export systemd provided metrics on the service unit, not on the individual tasks. For that level of detail (and numerous other "very fine grained") metrics, you should look into process-exporter. There is overlap between these three exporters, so make sure to read the documents if you use multiple. For example, if you are using systemd-exporter, then you should *not* enable these flags in node-exporter as we already expose identical metrics by default: `--systemd.collector.enable-task-metrics --systemd.collector.enable-restarts-metrics --systemd.collector.enable-start-time-metrics`. process-exporter has a concept of logically grouping processes according to the process names. This is a bottom-up variant of logical process grouping, while systemd's approach is top-down (e.g. groups are named and then processes are launched in them). The systemd approach provides much stronger guarantees that no processes/threads are "missing" from your group, but it does also require that you are using systemd as your init system whereas the bottom-up approach works on all systems. # Systemd versions There is varying support for different metrics based on systemd version. Flags that come from newer systemd versions are disabled by default to avoid breaking things for users using older systemd versions. Try enabling different flags, to see what works on your system. Optional Flags: Name | Description | ---------|-------------| --systemd.collector.enable-restart-count | Enables service restart count metrics. This feature only works with systemd 235 and above. --systemd.collector.enable-file-descriptor-size | Enables file descriptor size metrics. Systemd Exporter needs access to /proc/X/fd files. --systemd.collector.enable-ip-accounting | Enables service ip accounting metrics. This feature only works with systemd 235 and above. Of note, there is no customized support for `.snapshot` (removed in systemd v228), `.busname` (only present on systems using kdbus), `generated` (created via generators), `transient` (created during systemd-run) have no special support. # Deployment To install this exporter, either: * Clone this Git repository and run `go build`. * Use one of the prebuilt binaries from the [releases page](https://github.com/prometheus-community/systemd_exporter/releases/). * Use the [Ansible systemd_exporter role](https://prometheus-community.github.io/ansible/branch/main/systemd_exporter_role.html). * On NixOS, use the [services.prometheus.exporters.systemd module](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/monitoring/prometheus/exporters.nix). * Take a look at `examples` for daemonset manifests for Kubernetes. # User privilleges User needs to access systemd dbus, typically exporter needs to see node's `/proc` to work. # Metrics All metrics have `name` label, which contains systemd unit name. For example `name="bluetooth.service"` or `name="systemd-coredump.socket"`. Metrics that are present for all units (e.g. those named `unit_*`) additionally have a label type e.g. (`type="socket"` or `type="service"`) to allow usage in PromQL grouping queries (e.g. `count(systemd_unit_state) by (type)`) Note that a number of unit types are filtered by default | Metric name | Metric type | Status | Cardinality | | -------------------------------------------- | ----------- | -------- | ------------------------------------------------------------------ | | systemd_exporter_build_info | Gauge | UNSTABLE | 1 per systemd-exporter | | systemd_unit_info | Gauge | UNSTABLE | 1 per service + 1 per mount | | systemd_unit_state | Gauge | UNSTABLE | 5 per unit {state="activating/active/deactivating/failed/inactive} | | systemd_unit_tasks_current | Gauge | UNSTABLE | 1 per service | | systemd_unit_tasks_max | Gauge | UNSTABLE | 1 per service | | systemd_unit_start_time_seconds | Gauge | UNSTABLE | 1 per service | | systemd_service_restart_total | Gauge | UNSTABLE | 1 per service | | systemd_service_ip_ingress_bytes | Counter | UNSTABLE | 1 per service | | systemd_service_ip_egress_bytes | Counter | UNSTABLE | 1 per service | | systemd_service_ip_ingress_packets_total | Counter | UNSTABLE | 1 per service | | systemd_service_ip_egress_packets_total | Counter | UNSTABLE | 1 per service | | systemd_socket_accepted_connections_total | Counter | UNSTABLE | 1 per socket | | systemd_socket_current_connections | Gauge | UNSTABLE | 1 per socket | | systemd_socket_refused_connections_total | Gauge | UNSTABLE | 1 per socket | | systemd_timer_last_trigger_seconds | Gauge | UNSTABLE | 1 per timer | | systemd_watchdog_enabled | Gauge | UNSTABLE | 1 (only 1 watchdog configurable) | | systemd_watchdog_last_ping_monotonic_seconds | Gauge | UNSTABLE | 1 | | systemd_watchdog_last_ping_time_seconds | Gauge | UNSTABLE | 1 | | systemd_watchdog_runtime_seconds | Gauge | UNSTABLE | 1 | ## Configuration systemd_exporter allows you to include/exclude some systemd units. You can use `--systemd.collector.unit-include` and `--systemd.collector.unit-exclude` to select wanted units. Both of these options are in [RE2](https://github.com/google/re2/wiki/Syntax) syntax. For example: ``` args: - --systemd.collector.unit-include=.*ceph.*\.service|ceph.*\.timer|kubelet.service|docker.service - --systemd.collector.unit-exclude=ceph-volume.*\.service ``` ## TLS and basic authentication The systemd Exporter supports TLS and basic authentication. To use TLS and/or basic authentication, you need to pass a configuration file using the `--web.config.file` parameter. The format of the file is described [in the exporter-toolkit repository](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). systemd_exporter-0.7.0/SECURITY.md000066400000000000000000000002541476476532000167460ustar00rootroot00000000000000# Reporting a security issue The Prometheus security policy, including how to report vulnerabilities, can be found here: systemd_exporter-0.7.0/VERSION000066400000000000000000000000061476476532000162200ustar00rootroot000000000000000.7.0 systemd_exporter-0.7.0/examples/000077500000000000000000000000001476476532000167725ustar00rootroot00000000000000systemd_exporter-0.7.0/examples/kubernetes/000077500000000000000000000000001476476532000211415ustar00rootroot00000000000000systemd_exporter-0.7.0/examples/kubernetes/daemonset.yaml000066400000000000000000000025241476476532000240070ustar00rootroot00000000000000apiVersion: apps/v1 kind: DaemonSet metadata: name: systemd-exporter labels: name: systemd-exporter spec: selector: matchLabels: k8s-app: systemd-exporter updateStrategy: rollingUpdate: maxUnavailable: 100% type: RollingUpdate template: metadata: labels: k8s-app: systemd-exporter annotations: prometheus.io/scrape: "true" prometheus.io/path: "/metrics" prometheus.io/port: "9558" spec: securityContext: runAsUser: 0 hostPID: true containers: - name: systemd-exporter image: quay.io/prometheuscommunity/systemd-exporter:main securityContext: privileged: true args: - --log.level=info - --collector.unit-include=kubelet.service|docker.service ports: - name: metrics containerPort: 9558 hostPort: 9558 volumeMounts: - name: proc mountPath: /host/proc readOnly: true - name: systemd mountPath: /run/systemd readOnly: true resources: limits: memory: 100Mi requests: cpu: 10m memory: 100Mi volumes: - name: proc hostPath: path: /proc - name: systemd hostPath: path: /run/systemd systemd_exporter-0.7.0/go.mod000066400000000000000000000024751476476532000162720ustar00rootroot00000000000000module github.com/prometheus-community/systemd_exporter go 1.23.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 github.com/coreos/go-systemd/v22 v22.5.0 github.com/godbus/dbus/v5 v5.1.0 github.com/prometheus/client_golang v1.21.1 github.com/prometheus/common v0.62.0 github.com/prometheus/exporter-toolkit v0.13.2 ) require ( github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/klauspost/compress v1.17.11 // indirect github.com/mdlayher/socket v0.4.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/net v0.33.0 // indirect golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect google.golang.org/protobuf v1.36.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) systemd_exporter-0.7.0/go.sum000066400000000000000000000153731476476532000163200ustar00rootroot00000000000000github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= systemd_exporter-0.7.0/main.go000066400000000000000000000077501476476532000164400ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "net/http" _ "net/http/pprof" "os" "github.com/alecthomas/kingpin/v2" "github.com/prometheus-community/systemd_exporter/systemd" "github.com/prometheus-community/systemd_exporter/systemd/resolved" "github.com/prometheus/client_golang/prometheus" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promslog" "github.com/prometheus/common/promslog/flag" "github.com/prometheus/common/version" "github.com/prometheus/exporter-toolkit/web" webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag" ) func main() { var ( metricsPath = kingpin.Flag( "web.telemetry-path", "Path under which to expose metrics.", ).Default("/metrics").String() disableExporterMetrics = kingpin.Flag( "web.disable-exporter-metrics", "Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).", ).Bool() maxRequests = kingpin.Flag( "web.max-requests", "Maximum number of parallel scrape requests. Use 0 to disable.", ).Default("40").Int() enableResolvedgMetrics = kingpin.Flag("systemd.collector.enable-resolved", "Enable systemd-resolved statistics").Bool() toolkitFlags = webflag.AddFlags(kingpin.CommandLine, ":9558") ) promslogConfig := &promslog.Config{} flag.AddFlags(kingpin.CommandLine, promslogConfig) kingpin.Version(version.Print("systemd_exporter")) kingpin.HelpFlag.Short('h') kingpin.Parse() logger := promslog.New(promslogConfig) logger.Info("Starting systemd_exporter", "version", version.Info()) logger.Info("Build context", "build_context", version.BuildContext()) exporterMetricsRegistry := prometheus.NewRegistry() r := prometheus.NewRegistry() r.MustRegister(versioncollector.NewCollector("systemd_exporter")) collector, err := systemd.NewCollector(logger) if err != nil { logger.Error("Couldn't create collector", "err", err) os.Exit(1) } if err := r.Register(collector); err != nil { logger.Error("Couldn't register systemd collector", "err", err) os.Exit(1) } handler := promhttp.HandlerFor( prometheus.Gatherers{exporterMetricsRegistry, r}, promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError, MaxRequestsInFlight: *maxRequests, }, ) if !*disableExporterMetrics { handler = promhttp.InstrumentMetricHandler( exporterMetricsRegistry, handler, ) } if *enableResolvedgMetrics { resolvedCollector, err := resolved.NewCollector(logger) if err != nil { logger.Error("Couldn't create resolved collector", "err", err) os.Exit(1) } if err := r.Register(resolvedCollector); err != nil { logger.Error("Couldn't register resolved collector", "err", err) os.Exit(1) } } http.Handle(*metricsPath, handler) if *metricsPath != "/" && *metricsPath != "" { landingConfig := web.LandingConfig{ Name: "systemd Exporter", Description: "Prometheus Exporter for systemd", Version: version.Info(), Links: []web.LandingLinks{ { Address: *metricsPath, Text: "Metrics", }, }, } landingPage, err := web.NewLandingPage(landingConfig) if err != nil { logger.Error("Couldn't create landing page", "err", err.Error()) os.Exit(1) } http.Handle("/", landingPage) } srv := &http.Server{} if err := web.ListenAndServe(srv, toolkitFlags, logger); err != nil { logger.Error("Error starting server", "err", err) os.Exit(1) } } systemd_exporter-0.7.0/systemd/000077500000000000000000000000001476476532000166445ustar00rootroot00000000000000systemd_exporter-0.7.0/systemd/resolved/000077500000000000000000000000001476476532000204675ustar00rootroot00000000000000systemd_exporter-0.7.0/systemd/resolved/resolved.go000066400000000000000000000122031476476532000226370ustar00rootroot00000000000000// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resolved import ( "context" "log/slog" "github.com/godbus/dbus/v5" "github.com/prometheus/client_golang/prometheus" ) const ( namespace = "systemd" subsystem = "resolved" ) type Collector struct { ctx context.Context logger *slog.Logger } var ( resolvedCurrentTransactions = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "current_transactions"), "Resolved Current Transactions", nil, nil, ) resolvedTotalTransactions = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "transactions_total"), "Resolved Total Transactions", nil, nil, ) resolvedCurrentCacheSize = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "current_cache_size"), "Resolved Current Cache Size", nil, nil, ) resolvedTotalCacheHits = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "cache_hits_total"), "Resolved Total Cache Hits", nil, nil, ) resolvedTotalCacheMisses = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "cache_misses_total"), "Resolved Total Cache Misses", nil, nil, ) resolvedTotalSecure = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "dnssec_secure_total"), "Resolved Total number of DNSSEC Verdicts Secure", nil, nil, ) resolvedTotalInsecure = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "dnssec_insecure_total"), "Resolved Total number of DNSSEC Verdicts Insecure", nil, nil, ) resolvedTotalBogus = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "dnssec_bogus_total"), "Resolved Total number of DNSSEC Verdicts Boguss", nil, nil, ) resolvedTotalIndeterminate = prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "dnssec_indeterminate_total"), "Resolved Total number of DNSSEC Verdicts Indeterminat", nil, nil, ) ) // NewCollector returns a new Collector exporing resolved statistics func NewCollector(logger *slog.Logger) (*Collector, error) { ctx := context.TODO() return &Collector{ ctx: ctx, logger: logger, }, nil } // Collect gathers metrics from resolved func (c *Collector) Collect(ch chan<- prometheus.Metric) { err := c.collect(ch) if err != nil { c.logger.Error("error collecting resolved metrics", "err", err.Error()) } } // Describe gathers descriptions of metrics func (c *Collector) Describe(desc chan<- *prometheus.Desc) { desc <- resolvedCurrentTransactions desc <- resolvedTotalTransactions desc <- resolvedCurrentCacheSize desc <- resolvedTotalCacheHits desc <- resolvedTotalCacheMisses desc <- resolvedTotalSecure desc <- resolvedTotalInsecure desc <- resolvedTotalBogus desc <- resolvedTotalIndeterminate } func parseProperty(object dbus.BusObject, path string) (ret []float64, err error) { variant, err := object.GetProperty(path) if err != nil { return nil, err } for _, v := range variant.Value().([]interface{}) { i := v.(uint64) ret = append(ret, float64(i)) } return ret, err } func (c *Collector) collect(ch chan<- prometheus.Metric) error { conn, err := dbus.ConnectSystemBus() if err != nil { return err } defer conn.Close() obj := conn.Object("org.freedesktop.resolve1", "/org/freedesktop/resolve1") cacheStats, err := parseProperty(obj, "org.freedesktop.resolve1.Manager.CacheStatistics") if err != nil { return err } ch <- prometheus.MustNewConstMetric(resolvedCurrentCacheSize, prometheus.GaugeValue, float64(cacheStats[0])) ch <- prometheus.MustNewConstMetric(resolvedTotalCacheHits, prometheus.CounterValue, float64(cacheStats[1])) ch <- prometheus.MustNewConstMetric(resolvedTotalCacheMisses, prometheus.CounterValue, float64(cacheStats[2])) transactionStats, err := parseProperty(obj, "org.freedesktop.resolve1.Manager.TransactionStatistics") ch <- prometheus.MustNewConstMetric(resolvedCurrentTransactions, prometheus.GaugeValue, float64(transactionStats[0])) ch <- prometheus.MustNewConstMetric(resolvedTotalTransactions, prometheus.CounterValue, float64(transactionStats[1])) if err != nil { return err } dnssecStats, err := parseProperty(obj, "org.freedesktop.resolve1.Manager.DNSSECStatistics") ch <- prometheus.MustNewConstMetric(resolvedTotalSecure, prometheus.CounterValue, float64(dnssecStats[0])) ch <- prometheus.MustNewConstMetric(resolvedTotalInsecure, prometheus.CounterValue, float64(dnssecStats[1])) ch <- prometheus.MustNewConstMetric(resolvedTotalBogus, prometheus.CounterValue, float64(dnssecStats[2])) ch <- prometheus.MustNewConstMetric(resolvedTotalIndeterminate, prometheus.CounterValue, float64(dnssecStats[3])) if err != nil { return err } return nil } systemd_exporter-0.7.0/systemd/systemd.go000066400000000000000000000704111476476532000206660ustar00rootroot00000000000000// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package systemd import ( "context" "fmt" "log/slog" "math" "strconv" // Register pprof-over-http handlers _ "net/http/pprof" "regexp" "strings" "sync" "time" kingpin "github.com/alecthomas/kingpin/v2" "github.com/coreos/go-systemd/v22/dbus" "github.com/prometheus/client_golang/prometheus" ) const namespace = "systemd" const watchdogSubsystem = "watchdog" var ( unitInclude = kingpin.Flag("systemd.collector.unit-include", "Regexp of systemd units to include. Units must both match include and not match exclude to be included.").Default(".+").String() unitExclude = kingpin.Flag("systemd.collector.unit-exclude", "Regexp of systemd units to exclude. Units must both match include and not match exclude to be included.").Default(".+\\.(device)").String() systemdPrivate = kingpin.Flag("systemd.collector.private", "Establish a private, direct connection to systemd without dbus.").Bool() systemdUser = kingpin.Flag("systemd.collector.user", "Connect to the user systemd instance.").Bool() enableRestartsMetrics = kingpin.Flag("systemd.collector.enable-restart-count", "Enables service restart count metrics. This feature only works with systemd 235 and above.").Bool() enableIPAccountingMetrics = kingpin.Flag("systemd.collector.enable-ip-accounting", "Enables service ip accounting metrics. This feature only works with systemd 235 and above.").Bool() ) var unitStatesName = []string{"active", "activating", "deactivating", "inactive", "failed"} var ( errGetPropertyMsg = "couldn't get unit's %s property: %w" errConvertUint64PropertyMsg = "couldn't convert unit's %s property %v to uint64" errConvertUint32PropertyMsg = "couldn't convert unit's %s property %v to uint32" errConvertStringPropertyMsg = "couldn't convert unit's %s property %v to string" errUnitMetricsMsg = "couldn't get unit's metrics: %s" infoUnitNoHandler = "no unit type handler for %s" ) type Collector struct { ctx context.Context logger *slog.Logger systemdBootMonotonic *prometheus.Desc systemdBootTime *prometheus.Desc unitCPUTotal *prometheus.Desc unitState *prometheus.Desc unitInfo *prometheus.Desc unitStartTimeDesc *prometheus.Desc unitTasksCurrentDesc *prometheus.Desc unitTasksMaxDesc *prometheus.Desc unitActiveEnterTimeDesc *prometheus.Desc unitActiveExitTimeDesc *prometheus.Desc unitInactiveEnterTimeDesc *prometheus.Desc unitInactiveExitTimeDesc *prometheus.Desc nRestartsDesc *prometheus.Desc timerLastTriggerDesc *prometheus.Desc socketAcceptedConnectionsDesc *prometheus.Desc socketCurrentConnectionsDesc *prometheus.Desc socketRefusedConnectionsDesc *prometheus.Desc ipIngressBytes *prometheus.Desc ipEgressBytes *prometheus.Desc ipIngressPackets *prometheus.Desc ipEgressPackets *prometheus.Desc watchdogEnabled *prometheus.Desc watchdogLastPingMonotonic *prometheus.Desc watchdogLastPingTimestamp *prometheus.Desc watchdogRuntimeSeconds *prometheus.Desc unitIncludePattern *regexp.Regexp unitExcludePattern *regexp.Regexp } // NewCollector returns a new Collector exposing systemd statistics. func NewCollector(logger *slog.Logger) (*Collector, error) { systemdBootMonotonic := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "boot_monotonic_seconds"), "Systemd boot stage monotonic timestamps", []string{"stage"}, nil, ) systemdBootTime := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "boot_time_seconds"), "Systemd boot stage timestamps", []string{"stage"}, nil, ) // Type is labeled twice e.g. name="foo.service" and type="service" to maintain compatibility // with users before we started exporting type label unitState := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_state"), "Systemd unit", []string{"name", "type", "state"}, nil, ) // TODO think about if we want to have 1) one unit_info metric which has all possible labels // for all possible unit type variables (at least, the relatively static ones that we care // about such as type, generated-vs-real-unit, etc). Cons: a) huge waste since all these labels // have to be set to foo="" on non-relevant types. b) accidental overloading (e.g. we have type // label, but it means something different for a service vs a mount. Right now it's impossible to // detangle that. // Option 1) is we have service_info, mount_info, target_info, etc. Many more metrics, but far fewer // wasted labels and little chance of semantic confusion. Our current codebase is not tuned for this, // we would be adding likt 30% more lines of just boilerplate to declare these different metrics // w.r.t. cardinality and performance, option 2 is slightly better performance due to smaller scrape payloads // but otherwise (1) and (2) seem similar unitInfo := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_info"), "Mostly-static metadata for all unit types", []string{"name", "type", "mount_type", "service_type"}, nil, ) unitStartTimeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_start_time_seconds"), "Start time of the unit since unix epoch in seconds.", []string{"name", "type"}, nil, ) unitTasksCurrentDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_tasks_current"), "Current number of tasks per Systemd unit", []string{"name"}, nil, ) unitTasksMaxDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_tasks_max"), "Maximum number of tasks per Systemd unit", []string{"name", "type"}, nil, ) unitActiveEnterTimeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_active_enter_time_seconds"), "Last time the unit transitioned into the active state", []string{"name", "type"}, nil, ) unitActiveExitTimeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_active_exit_time_seconds"), "Last time the unit transitioned out of the active state", []string{"name", "type"}, nil, ) unitInactiveEnterTimeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_inactive_enter_time_seconds"), "Last time the unit transitioned into the inactive state", []string{"name", "type"}, nil, ) unitInactiveExitTimeDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_inactive_exit_time_seconds"), "Last time the unit transitioned out of the inactive state", []string{"name", "type"}, nil, ) nRestartsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "service_restart_total"), "Service unit count of Restart triggers", []string{"name"}, nil) timerLastTriggerDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "timer_last_trigger_seconds"), "Seconds since epoch of last trigger.", []string{"name"}, nil) socketAcceptedConnectionsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "socket_accepted_connections_total"), "Total number of accepted socket connections", []string{"name"}, nil) socketCurrentConnectionsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "socket_current_connections"), "Current number of socket connections", []string{"name"}, nil) socketRefusedConnectionsDesc := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "socket_refused_connections_total"), "Total number of refused socket connections", []string{"name"}, nil) // We could add a cpu label, but IMO that could cause a cardinality explosion. We already export // two modes per unit (user/system), and on a modest 4 core machine adding a cpu label would cause us to export 8 metrics // e.g. (2 modes * 4 cores) per enabled unit unitCPUTotal := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "unit_cpu_seconds_total"), "Unit CPU time in seconds", []string{"name", "type", "mode"}, nil, ) ipIngressBytes := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "service_ip_ingress_bytes"), "Service unit ingress IP accounting in bytes.", []string{"name"}, nil, ) ipEgressBytes := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "service_ip_egress_bytes"), "Service unit egress IP accounting in bytes.", []string{"name"}, nil, ) ipIngressPackets := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "service_ip_ingress_packets_total"), "Service unit ingress IP accounting in packets.", []string{"name"}, nil, ) ipEgressPackets := prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "service_ip_egress_packets_total"), "Service unit egress IP accounting in packets.", []string{"name"}, nil, ) watchdogEnabled := prometheus.NewDesc( prometheus.BuildFQName(namespace, watchdogSubsystem, "enabled"), "systemd watchdog enabled", nil, nil, ) watchdogLastPingMonotonic := prometheus.NewDesc( prometheus.BuildFQName(namespace, watchdogSubsystem, "last_ping_monotonic_seconds"), "systemd watchdog last ping monotonic seconds", []string{"device"}, nil, ) watchdogLastPingTimestamp := prometheus.NewDesc( prometheus.BuildFQName(namespace, watchdogSubsystem, "last_ping_time_seconds"), "systemd watchdog last ping time seconds", []string{"device"}, nil, ) watchdogRuntimeSeconds := prometheus.NewDesc( prometheus.BuildFQName(namespace, watchdogSubsystem, "runtime_seconds"), "systemd watchdog runtime seconds", []string{"device"}, nil, ) unitIncludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitInclude)) unitExcludePattern := regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *unitExclude)) // TODO: Build a custom handler to pass in the scrape http context. ctx := context.TODO() return &Collector{ ctx: ctx, logger: logger, systemdBootMonotonic: systemdBootMonotonic, systemdBootTime: systemdBootTime, unitCPUTotal: unitCPUTotal, unitState: unitState, unitInfo: unitInfo, unitStartTimeDesc: unitStartTimeDesc, unitTasksCurrentDesc: unitTasksCurrentDesc, unitTasksMaxDesc: unitTasksMaxDesc, unitActiveEnterTimeDesc: unitActiveEnterTimeDesc, unitActiveExitTimeDesc: unitActiveExitTimeDesc, unitInactiveEnterTimeDesc: unitInactiveEnterTimeDesc, unitInactiveExitTimeDesc: unitInactiveExitTimeDesc, nRestartsDesc: nRestartsDesc, timerLastTriggerDesc: timerLastTriggerDesc, socketAcceptedConnectionsDesc: socketAcceptedConnectionsDesc, socketCurrentConnectionsDesc: socketCurrentConnectionsDesc, socketRefusedConnectionsDesc: socketRefusedConnectionsDesc, ipIngressBytes: ipIngressBytes, ipEgressBytes: ipEgressBytes, ipIngressPackets: ipIngressPackets, ipEgressPackets: ipEgressPackets, unitIncludePattern: unitIncludePattern, unitExcludePattern: unitExcludePattern, watchdogEnabled: watchdogEnabled, watchdogLastPingMonotonic: watchdogLastPingMonotonic, watchdogLastPingTimestamp: watchdogLastPingTimestamp, watchdogRuntimeSeconds: watchdogRuntimeSeconds, }, nil } // Collect gathers metrics from systemd. func (c *Collector) Collect(ch chan<- prometheus.Metric) { err := c.collect(ch) if err != nil { c.logger.Error("error collecting metrics", "err", err.Error()) } } // Describe gathers descriptions of Metrics func (c *Collector) Describe(desc chan<- *prometheus.Desc) { desc <- c.systemdBootMonotonic desc <- c.systemdBootTime desc <- c.unitCPUTotal desc <- c.unitState desc <- c.unitInfo desc <- c.unitStartTimeDesc desc <- c.unitTasksCurrentDesc desc <- c.unitTasksMaxDesc desc <- c.nRestartsDesc desc <- c.timerLastTriggerDesc desc <- c.socketAcceptedConnectionsDesc desc <- c.socketCurrentConnectionsDesc desc <- c.socketRefusedConnectionsDesc desc <- c.ipIngressBytes desc <- c.ipEgressBytes desc <- c.ipIngressPackets desc <- c.ipEgressPackets desc <- c.watchdogEnabled desc <- c.watchdogLastPingMonotonic desc <- c.watchdogLastPingTimestamp desc <- c.watchdogRuntimeSeconds } func parseUnitType(unit dbus.UnitStatus) string { t := strings.Split(unit.Name, ".") return t[len(t)-1] } func (c *Collector) collect(ch chan<- prometheus.Metric) error { begin := time.Now() conn, err := c.newDbus() if err != nil { return fmt.Errorf("couldn't get dbus connection: %w", err) } defer conn.Close() err = c.collectBootStageTimestamps(conn, ch) if err != nil { c.logger.Debug("Failed to collect boot stage timestamps", "err", err.Error()) } err = c.collectWatchdogMetrics(conn, ch) if err != nil { c.logger.Debug("Failed to collect watchdog metrics", "err", err.Error()) } allUnits, err := conn.ListUnitsContext(c.ctx) if err != nil { return fmt.Errorf("could not get list of systemd units from dbus: %w", err) } c.logger.Debug("systemd ListUnits took", "seconds", time.Since(begin).Seconds()) begin = time.Now() units := c.filterUnits(allUnits, c.unitIncludePattern, c.unitExcludePattern) c.logger.Debug("systemd filterUnits took", "seconds", time.Since(begin).Seconds()) var wg sync.WaitGroup wg.Add(len(units)) for _, unit := range units { go func(unit dbus.UnitStatus) { err := c.collectUnit(conn, ch, unit) if err != nil { c.logger.Warn(errUnitMetricsMsg, "err", err.Error()) } wg.Done() }(unit) } wg.Wait() return nil } func (c *Collector) collectBootStageTimestamps(conn *dbus.Conn, ch chan<- prometheus.Metric) error { stages := []string{"Finish", "Firmware", "Loader", "Kernel", "InitRD", "InitRDGeneratorsStart", "InitRDGeneratorsFinish", "InitRDSecurityStart", "InitRDSecurityFinish", "InitRDUnitsLoadStart", "InitRDUnitsLoadFinish", "GeneratorsStart", "GeneratorsFinish", "SecurityStart", "SecurityFinish", "Userspace", "UnitsLoadStart", "UnitsLoadFinish"} for _, stage := range stages { stageMonotonicValue, err := conn.GetManagerProperty(fmt.Sprintf("%sTimestampMonotonic", stage)) if err != nil { return err } stageTimestampValue, err := conn.GetManagerProperty(fmt.Sprintf("%sTimestamp", stage)) if err != nil { return err } stageMonotonic := strings.TrimPrefix(strings.TrimSuffix(stageMonotonicValue, `"`), `"`) stageTimestamp := strings.TrimPrefix(strings.TrimSuffix(stageTimestampValue, `"`), `"`) vMonotonic, err := strconv.ParseFloat(strings.TrimLeft(stageMonotonic, "@t "), 64) if err != nil { return err } vTimestamp, err := strconv.ParseFloat(strings.TrimLeft(stageTimestamp, "@t "), 64) if err != nil { return err } ch <- prometheus.MustNewConstMetric( c.systemdBootMonotonic, prometheus.GaugeValue, float64(vMonotonic)/1e6, stage) ch <- prometheus.MustNewConstMetric( c.systemdBootTime, prometheus.GaugeValue, float64(vTimestamp)/1e6, stage) } return nil } func (c *Collector) collectUnit(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { logger := c.logger.With("unit", unit.Name) // Collect unit_state for all err := c.collectUnitState(ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) // TODO should we continue processing here? } err = c.collectUnitTimeMetrics(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } switch { case strings.HasSuffix(unit.Name, ".service"): err = c.collectServiceMetainfo(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } err = c.collectServiceStartTimeMetrics(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } if *enableRestartsMetrics { err = c.collectServiceRestartCount(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } } err = c.collectServiceTasksMetrics(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } if *enableIPAccountingMetrics { err = c.collectIPAccountingMetrics(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } } case strings.HasSuffix(unit.Name, ".mount"): err = c.collectMountMetainfo(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } case strings.HasSuffix(unit.Name, ".timer"): err := c.collectTimerTriggerTime(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } case strings.HasSuffix(unit.Name, ".socket"): err := c.collectSocketConnMetrics(conn, ch, unit) if err != nil { logger.Warn(errUnitMetricsMsg, "err", err.Error()) } default: logger.Debug(infoUnitNoHandler, "unit", unit.Name) } return nil } func (c *Collector) collectUnitState(ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { // TODO: wrap GetUnitTypePropertyString( // serviceTypeProperty, err := conn.GetUnitTypeProperty(unit.Name, "Timer", "NextElapseUSecMonotonic") for _, stateName := range unitStatesName { isActive := 0.0 if stateName == unit.ActiveState { isActive = 1.0 } ch <- prometheus.MustNewConstMetric( c.unitState, prometheus.GaugeValue, isActive, unit.Name, parseUnitType(unit), stateName) } return nil } func (c *Collector) collectUnitTimeMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { err := c.collectUnitTimeMetric(conn, ch, unit, c.unitActiveEnterTimeDesc, "ActiveEnterTimestamp") if err != nil { return err } err = c.collectUnitTimeMetric(conn, ch, unit, c.unitActiveExitTimeDesc, "ActiveExitTimestamp") if err != nil { return err } err = c.collectUnitTimeMetric(conn, ch, unit, c.unitInactiveEnterTimeDesc, "InactiveEnterTimestamp") if err != nil { return err } err = c.collectUnitTimeMetric(conn, ch, unit, c.unitInactiveExitTimeDesc, "InactiveExitTimestamp") if err != nil { return err } return nil } func (c *Collector) collectUnitTimeMetric(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus, desc *prometheus.Desc, propertyName string) error { timestampValue, err := conn.GetUnitPropertyContext(c.ctx, unit.Name, propertyName) if err != nil { return fmt.Errorf(errGetPropertyMsg, propertyName, err) } startTimeUsec, ok := timestampValue.Value.Value().(uint64) if !ok { return fmt.Errorf(errConvertUint64PropertyMsg, propertyName, timestampValue.Value.Value()) } ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(startTimeUsec)/1e6, unit.Name, parseUnitType(unit)) return nil } // TODO metric is named unit but function is "Mount" func (c *Collector) collectMountMetainfo(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { // TODO: wrap GetUnitTypePropertyString( serviceTypeProperty, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Mount", "Type") if err != nil { return fmt.Errorf(errGetPropertyMsg, "Type", err) } serviceType, ok := serviceTypeProperty.Value.Value().(string) if !ok { return fmt.Errorf(errConvertStringPropertyMsg, "Type", serviceTypeProperty.Value.Value()) } ch <- prometheus.MustNewConstMetric( c.unitInfo, prometheus.GaugeValue, 1.0, unit.Name, parseUnitType(unit), serviceType, "") return nil } // TODO the metric is named unit_info but function is named "Service" func (c *Collector) collectServiceMetainfo(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { serviceTypeProperty, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Service", "Type") if err != nil { return fmt.Errorf(errGetPropertyMsg, "Type", err) } serviceType, ok := serviceTypeProperty.Value.Value().(string) if !ok { return fmt.Errorf(errConvertStringPropertyMsg, "Type", serviceTypeProperty.Value.Value()) } ch <- prometheus.MustNewConstMetric( c.unitInfo, prometheus.GaugeValue, 1.0, unit.Name, parseUnitType(unit), "", serviceType) return nil } func (c *Collector) collectServiceRestartCount(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { restartsCount, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Service", "NRestarts") if err != nil { return fmt.Errorf(errGetPropertyMsg, "NRestarts", err) } val, ok := restartsCount.Value.Value().(uint32) if !ok { return fmt.Errorf(errConvertUint32PropertyMsg, "NRestarts", restartsCount.Value.Value()) } ch <- prometheus.MustNewConstMetric( c.nRestartsDesc, prometheus.CounterValue, float64(val), unit.Name) return nil } // TODO metric is named unit but function is "Service" func (c *Collector) collectServiceStartTimeMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { var startTimeUsec uint64 switch unit.ActiveState { case "active": timestampValue, err := conn.GetUnitPropertyContext(c.ctx, unit.Name, "ActiveEnterTimestamp") if err != nil { return fmt.Errorf(errGetPropertyMsg, "ActiveEnterTimestamp", err) } startTime, ok := timestampValue.Value.Value().(uint64) if !ok { return fmt.Errorf(errConvertUint64PropertyMsg, "ActiveEnterTimestamp", timestampValue.Value.Value()) } startTimeUsec = startTime default: startTimeUsec = 0 } ch <- prometheus.MustNewConstMetric( c.unitStartTimeDesc, prometheus.GaugeValue, float64(startTimeUsec)/1e6, unit.Name, parseUnitType(unit)) return nil } func (c *Collector) collectSocketConnMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { acceptedConnectionCount, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Socket", "NAccepted") if err != nil { return fmt.Errorf(errGetPropertyMsg, "NAccepted", err) } ch <- prometheus.MustNewConstMetric( c.socketAcceptedConnectionsDesc, prometheus.CounterValue, float64(acceptedConnectionCount.Value.Value().(uint32)), unit.Name) currentConnectionCount, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Socket", "NConnections") if err != nil { return fmt.Errorf(errGetPropertyMsg, "NConnections", err) } ch <- prometheus.MustNewConstMetric( c.socketCurrentConnectionsDesc, prometheus.GaugeValue, float64(currentConnectionCount.Value.Value().(uint32)), unit.Name) // NRefused wasn't added until systemd 239. refusedConnectionCount, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Socket", "NRefused") if err != nil { return fmt.Errorf(errGetPropertyMsg, "NRefused", err) } ch <- prometheus.MustNewConstMetric( c.socketRefusedConnectionsDesc, prometheus.GaugeValue, float64(refusedConnectionCount.Value.Value().(uint32)), unit.Name) return nil } func (c *Collector) collectIPAccountingMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { unitPropertyToPromDesc := map[string]*prometheus.Desc{ "IPIngressBytes": c.ipIngressBytes, "IPEgressBytes": c.ipEgressBytes, "IPIngressPackets": c.ipIngressPackets, "IPEgressPackets": c.ipEgressPackets, } for propertyName, desc := range unitPropertyToPromDesc { property, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Service", propertyName) if err != nil { return fmt.Errorf(errGetPropertyMsg, propertyName, err) } counter, ok := property.Value.Value().(uint64) if !ok { return fmt.Errorf(errConvertUint64PropertyMsg, propertyName, property.Value.Value()) } ch <- prometheus.MustNewConstMetric(desc, prometheus.CounterValue, float64(counter), unit.Name) } return nil } // TODO either the unit should be called service_tasks, or it should work for all // units. It's currently named unit_task func (c *Collector) collectServiceTasksMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { tasksCurrentCount, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Service", "TasksCurrent") if err != nil { return fmt.Errorf(errGetPropertyMsg, "TasksCurrent", err) } currentCount, ok := tasksCurrentCount.Value.Value().(uint64) if !ok { return fmt.Errorf(errConvertUint64PropertyMsg, "TasksCurrent", tasksCurrentCount.Value.Value()) } // Don't set if tasksCurrent if dbus reports MaxUint64. if currentCount != math.MaxUint64 { ch <- prometheus.MustNewConstMetric( c.unitTasksCurrentDesc, prometheus.GaugeValue, float64(currentCount), unit.Name) } tasksMaxCount, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Service", "TasksMax") if err != nil { return fmt.Errorf(errGetPropertyMsg, "TasksMax", err) } maxCount, ok := tasksMaxCount.Value.Value().(uint64) if !ok { return fmt.Errorf(errConvertUint64PropertyMsg, "TasksMax", tasksMaxCount.Value.Value()) } // Don't set if tasksMax if dbus reports MaxUint64. if maxCount != math.MaxUint64 { ch <- prometheus.MustNewConstMetric( c.unitTasksMaxDesc, prometheus.GaugeValue, float64(maxCount), unit.Name, parseUnitType(unit)) } return nil } func (c *Collector) collectTimerTriggerTime(conn *dbus.Conn, ch chan<- prometheus.Metric, unit dbus.UnitStatus) error { lastTriggerValue, err := conn.GetUnitTypePropertyContext(c.ctx, unit.Name, "Timer", "LastTriggerUSec") if err != nil { return fmt.Errorf(errGetPropertyMsg, "LastTriggerUSec", err) } val, ok := lastTriggerValue.Value.Value().(uint64) if !ok { return fmt.Errorf(errConvertUint64PropertyMsg, "LastTriggerUSec", lastTriggerValue.Value.Value()) } ch <- prometheus.MustNewConstMetric( c.timerLastTriggerDesc, prometheus.GaugeValue, float64(val)/1e6, unit.Name) return nil } func (c *Collector) newDbus() (*dbus.Conn, error) { if *systemdPrivate { return dbus.NewSystemdConnectionContext(c.ctx) } if *systemdUser { return dbus.NewUserConnectionContext(c.ctx) } return dbus.NewWithContext(c.ctx) } func (c *Collector) filterUnits(units []dbus.UnitStatus, includePattern, excludePattern *regexp.Regexp) []dbus.UnitStatus { filtered := make([]dbus.UnitStatus, 0, len(units)) for _, unit := range units { if includePattern.MatchString(unit.Name) && !excludePattern.MatchString(unit.Name) && unit.LoadState == "loaded" { c.logger.Debug("Adding unit", "unit", unit.Name) filtered = append(filtered, unit) } else { c.logger.Debug("Ignoring unit", "unit", unit.Name) } } return filtered } func (c *Collector) collectWatchdogMetrics(conn *dbus.Conn, ch chan<- prometheus.Metric) error { watchdogDevice, err := conn.GetManagerProperty("WatchdogDevice") if err != nil { return err } watchdogDeviceString := strings.TrimPrefix(strings.TrimSuffix(watchdogDevice, `"`), `"`) if len(watchdogDeviceString) == 0 { ch <- prometheus.MustNewConstMetric( c.watchdogEnabled, prometheus.GaugeValue, 0) c.logger.Debug("No watchdog configured, ignoring metrics") return nil } watchdogLastPingMonotonicProperty, err := conn.GetManagerProperty("WatchdogLastPingTimestampMonotonic") if err != nil { return err } watchdogLastPingTimeProperty, err := conn.GetManagerProperty("WatchdogLastPingTimestamp") if err != nil { return err } runtimeWatchdogUSecProperty, err := conn.GetManagerProperty("RuntimeWatchdogUSec") if err != nil { return err } watchdogLastPingMonotonic, err := strconv.ParseFloat(strings.TrimLeft(watchdogLastPingMonotonicProperty, "@t "), 64) if err != nil { return err } watchdogLastPingTimestamp, err := strconv.ParseFloat(strings.TrimLeft(watchdogLastPingTimeProperty, "@t "), 64) if err != nil { return err } runtimeWatchdogUSec, err := strconv.ParseFloat(strings.TrimLeft(runtimeWatchdogUSecProperty, "@t "), 64) if err != nil { return err } ch <- prometheus.MustNewConstMetric( c.watchdogEnabled, prometheus.GaugeValue, 1) ch <- prometheus.MustNewConstMetric( c.watchdogLastPingMonotonic, prometheus.GaugeValue, float64(watchdogLastPingMonotonic)/1e6, watchdogDeviceString) ch <- prometheus.MustNewConstMetric( c.watchdogLastPingTimestamp, prometheus.GaugeValue, float64(watchdogLastPingTimestamp)/1e6, watchdogDeviceString) ch <- prometheus.MustNewConstMetric( c.watchdogRuntimeSeconds, prometheus.GaugeValue, float64(runtimeWatchdogUSec)/1e6, watchdogDeviceString) return nil } systemd_exporter-0.7.0/test_image.sh000077500000000000000000000011511476476532000176320ustar00rootroot00000000000000#!/bin/bash set -exo pipefail docker_image=$1 port=$2 container_id='' wait_start() { for in in {1..10}; do if /usr/bin/curl -s -m 5 -f "http://localhost:${port}/metrics" > /dev/null; then docker_cleanup exit 0 else sleep 1 fi done exit 1 } docker_start() { container_id=$(docker run -d -p "${port}":"${port}" "${docker_image}") } docker_cleanup() { docker kill "${container_id}" } if [[ "$#" -ne 2 ]] ; then echo "Usage: $0 quay.io/prometheuscommunity/systemd-exporter:v0.2.0 9558" >&2 exit 1 fi docker_start wait_start