pax_global_header00006660000000000000000000000064144242307650014521gustar00rootroot0000000000000052 comment=fecd28b250d7c028fdacf2b57b39afb1e5a5a83f prometheus-client-mmap-v0.23.1/000077500000000000000000000000001442423076500163715ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/.clang-format000066400000000000000000000000651442423076500207450ustar00rootroot00000000000000BasedOnStyle: Google IndentWidth: 4 ColumnLimit: 120 prometheus-client-mmap-v0.23.1/.coveralls.yml000066400000000000000000000000301442423076500211550ustar00rootroot00000000000000service_name: travis-ci prometheus-client-mmap-v0.23.1/.gitignore000066400000000000000000000002551442423076500203630ustar00rootroot00000000000000coverage/ Gemfile.lock pkg/ prometheus-client-mmap*.gem tmp/ lib/*.bundle lib/*.so ext/fast_mmaped_file/jsmn.c ext/fast_mmaped_file/hashmap.c ext/fast_mmaped_file_rs/target prometheus-client-mmap-v0.23.1/.gitlab-ci.yml000066400000000000000000000147411442423076500210340ustar00rootroot00000000000000stages: - test - build_gems - smoke_test - deploy - release .install-rust: &install-rust - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --quiet --default-toolchain=1.65.0 --profile=minimal - source "$HOME/.cargo/env" .install-ruby: &install-ruby - ruby -v - gem update --no-document --system - bundle --version - bundle config --local path vendor - bundle install -j $(nproc) .install-ruby-and-compile: &install-ruby-and-compile - *install-ruby - bundle exec rake compile cache: paths: - vendor/ruby before_script: - apt-get update - apt-get install -y curl ruby ruby-dev build-essential llvm-dev libclang-dev clang - *install-rust - *install-ruby-and-compile .test-job: &test-job image: ruby:${RUBY_VERSION} variables: prometheus_multiproc_dir: tmp/ BUILDER_IMAGE_REVISION: "4.9.1" RUBY_VERSION: "2.7" script: - bundle exec rake spec - cd ext/fast_mmaped_file_rs && cargo nextest run artifacts: paths: - coverage/ ruby: <<: *test-job before_script: - apt-get update - apt-get install -y llvm-dev libclang-dev clang - *install-rust - *install-ruby-and-compile - curl -LsSf https://get.nexte.st/0.9/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin parallel: matrix: - RUBY_VERSION: ["2.7", "3.0", "3.1", "3.2"] builder:centos_7: <<: *test-job before_script: - source /opt/rh/llvm-toolset-7/enable - *install-ruby-and-compile script: - bundle exec rake spec image: registry.gitlab.com/gitlab-org/gitlab-omnibus-builder/centos_7:${BUILDER_IMAGE_REVISION} cache: key: "centos_7" paths: - vendor/ruby builder:centos_8: <<: *test-job before_script: - *install-ruby-and-compile script: - bundle exec rake spec image: registry.gitlab.com/gitlab-org/gitlab-omnibus-builder/centos_8:${BUILDER_IMAGE_REVISION} cache: key: "centos_8" paths: - vendor/ruby i386/debian:bullseye: <<: *test-job before_script: - apt-get update - apt-get install -y curl ruby ruby-dev build-essential llvm-dev libclang-dev clang - *install-rust - *install-ruby-and-compile - curl -LsSf https://get.nexte.st/0.9/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin - export LANG=C.UTF-8 script: - bundle exec rake spec cache: key: "i386-debian-bullseye" paths: - vendor/ruby archlinux:ruby:2.5: <<: *test-job image: archlinux/archlinux:base before_script: - pacman -Sy --noconfirm git gcc clang make ruby ruby-bundler ruby-rdoc ruby-rake which grep gawk procps-ng - *install-rust - *install-ruby-and-compile - curl -LsSf https://get.nexte.st/0.9/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin script: - bundle exec rake spec cache: key: "archlinux-ruby2.5" paths: - vendor/ruby fuzzbert: image: ruby:2.7 variables: prometheus_multiproc_dir: tmp/ script: - bundle exec fuzzbert fuzz/**/fuzz_*.rb --handler PrintAndExitHandler --limit 10000 clang-format check: image: debian:bullseye-slim before_script: - apt-get update - apt-get install -y clang-format git script: - find ext/ -name '*.[ch]' | xargs clang-format -style=file -i - git diff --exit-code rustfmt check: image: debian:bullseye-slim before_script: - apt-get update - apt-get install -y curl - *install-rust - rustup component add rustfmt script: - cargo fmt --manifest-path ext/fast_mmaped_file_rs/Cargo.toml -- --check rspec Address-Sanitizer: image: debian:bullseye-slim before_script: - apt-get update - apt-get install -y git libasan5 ruby ruby-dev gem build-essential curl llvm-dev libclang-dev clang - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --quiet --default-toolchain=nightly-2022-11-03 --profile=minimal - source "$HOME/.cargo/env" - rustup component add rust-src - *install-ruby allow_failure: true script: - RB_SYS_EXTRA_CARGO_ARGS='-Zbuild-std' CARGO_BUILD_TARGET=x86_64-unknown-linux-gnu rake compile -- --enable-address-sanitizer - export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libasan.so.5 - export ASAN_OPTIONS=atexit=true:verbosity=1 - bundle exec rspec > /dev/null parsing Address-Sanitizer: image: debian:bullseye-slim before_script: - apt-get update - apt-get install -y git libasan5 ruby ruby-dev gem build-essential curl llvm-dev libclang-dev clang - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --quiet --default-toolchain=nightly-2022-11-03 --profile=minimal - source "$HOME/.cargo/env" - rustup component add rust-src - *install-ruby-and-compile allow_failure: true script: - bundle exec rspec - RB_SYS_EXTRA_CARGO_ARGS='-Zbuild-std' CARGO_BUILD_TARGET=x86_64-unknown-linux-gnu rake compile -- --enable-address-sanitizer - DB_FILE=$(basename $(find tmp -name '*.db' | head -n 1)) - test -n $DB_FILE - for ((i=1;i<=100;i++)); do cp tmp/$DB_FILE tmp/$i$DB_FILE; done - export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libasan.so.5 - export ASAN_OPTIONS=atexit=true:verbosity=1 - export LSAN_OPTIONS=suppressions=known-leaks-suppression.txt - bundle exec bin/parse -t tmp/*.db > /dev/null pages: image: ruby:2.7 stage: deploy dependencies: - "ruby: [2.7]" script: - mv coverage/ public/ artifacts: paths: - public expire_in: 30 days only: - master gems: image: docker:20.10.16 services: - docker:20.10.16-dind stage: build_gems needs: [] variables: DOCKER_HOST: tcp://docker:2375 DOCKER_TLS_CERTDIR: "" before_script: - apk add ruby ruby-dev git make gcc g++ - *install-ruby script: - bundle exec rake gem:${TARGET_PLATFORM} parallel: matrix: - TARGET_PLATFORM: ["aarch64-linux", "arm64-darwin", "x86_64-darwin", "x86_64-linux"] artifacts: paths: - pkg/*.gem cache: key: "gem-build-${CI_JOB_IMAGE}" paths: - vendor/ruby only: - tags gem_smoke_test: stage: build_gems image: ruby:3.1 needs: ["gems: [x86_64-linux]"] before_script: - ruby -v script: - gem install pkg/prometheus-client-mmap-*-x86_64-linux.gem - echo "Checking if Rust extension is available..." - ruby -r 'prometheus/client/formats/text' -e "exit 1 unless Prometheus::Client::Formats::Text.rust_impl_available?" cache: [] only: - tags release: stage: release image: ruby:3.1 before_script: - gem -v script: - ls -al pkg/*.gem - tools/deploy-rubygem.sh cache: [] only: - tags prometheus-client-mmap-v0.23.1/.rubocop.yml000066400000000000000000000004751442423076500206510ustar00rootroot00000000000000AllCops: TargetRubyVersion: 2.2 Exclude: - lib/prometheus/client/version.rb - tmp AlignHash: EnforcedHashRocketStyle: table Style/TrailingCommaInArguments: EnforcedStyleForMultiline: comma Style/TrailingCommaInLiteral: EnforcedStyleForMultiline: comma Metrics/AbcSize: Max: 18 prometheus-client-mmap-v0.23.1/.tool-versions000066400000000000000000000000141442423076500212100ustar00rootroot00000000000000rust 1.65.0 prometheus-client-mmap-v0.23.1/.travis.yml000066400000000000000000000001201442423076500204730ustar00rootroot00000000000000sudo: false language: ruby rvm: - 1.9.3 - 2.2.5 - 2.3.1 - jruby-9.0.5.0 prometheus-client-mmap-v0.23.1/AUTHORS.md000066400000000000000000000012041442423076500200350ustar00rootroot00000000000000The Prometheus project was started by Matt T. Proud (emeritus) and Julius Volz in 2012. Maintainers of this repository: * Tobias Schmidt The following individuals have contributed code to this repository (listed in alphabetical order): * Andrea Bernardo Ciddio * Bjoern Rabenstein * Dmitry Krasnoukhov * Julius Volz * Kirk Russell * Patrick Ellis * Roland Rifandi Utama * Sergio Gil * Tobias Schmidt prometheus-client-mmap-v0.23.1/CHANGELOG.md000066400000000000000000000051071442423076500202050ustar00rootroot00000000000000## v0.23.1 - Use c_long for Ruby string length !109 ## v0.23.0 - Drop musl precompiled gem and relax RubyGems dependency !106 ## v0.22.0 - Re-implement write path in Rust !103 ## v0.21.0 - Remove 'rustc' check from 'Rakefile' !97 - Add support for precompiled gems !99 - Refactor 'bin/setup' to remove uninitialized vars !100 - ci: create precompiled gems and push to Rubygems automatically !101 - Require RubyGems >= v3.3.22 !101 ## v0.20.3 - Check for 'rustc' in 'extconf.rb' !95 ## v0.20.2 - Allocate EntryMap keys only when needed !92 - Don't auto-install Rust toolchain on 'gem install' !93 ## v0.20.1 - Install Rust extension to 'lib' !90 ## v0.20.0 - Use system page size !84 - Implement 'to_metrics' in Rust !85 ## v0.19.1 - No changes; v0.19.0 gem pulled in some unnecessary files. ## v0.19.0 - Fix seg fault after memory is unmapped !80 ## v0.18.0 - pushgateway: add grouping_key feature !76 ## v0.17.0 - Fix crash when trying to inspect all strings !74 ## v0.16.2 - No code changes. Retagging due to extraneous file included in package. ## v0.16.1 - Improve LabelSetValidator debug messages !69 - Properly rescue Oj exceptions !70 ## v0.16.0 - Make sure on reset we release file descriptors for open files !63 ## v0.15.0 - Make labels order independent !60 ## v0.14.0 - Remove deprecated taint mechanism logic !59 ## v0.13.0 - Gauge: add decrement method to gauge metric type !57 - Update push.rb to use newest client_ruby code !56 ## v0.12.0 - Remove deprecated rb_safe_level() and rb_secure() calls !53 ## v0.11.0 - Include filename in IOError exception !47 - Fix clang-format violations !49 - CI: use libasan5 !50 - Truncate MmappedDict#inspect output !51 ## v0.10.0 - Eliminate SIGBUS errors in parsing metrics !43 - Make it easier to diagnose clang-format failures !44 - Add Ruby 2.6 and 2.7 to CI builds !45 ## v0.9.10 - Extend `Prometheus::Client.reinitialize_on_pid_change` method to receive `:force` param !40 With `force: true` it reinitializes all metrics files. With `force: false` (default) it reinitializes only on changed PID (as it was before). In any case, it keeps the registry (as it was before). Thus, the change is backward compatible. ## v0.9.9 - Do not allow label values that will corrupt the metrics !38 ## v0.9.7 - Restore Prometheus logger !36 ## v0.9.7 - Disable warning if prometheus_multiproc_dir is not set !35 ## v0.9.6 - Add missing `pid=` label for metrics without labels !31 ## v0.9.5 - Set multiprocess_files_dir config to temp directory by default https://gitlab.com/gitlab-org/prometheus-client-mmap/merge_requests/28 prometheus-client-mmap-v0.23.1/CONTRIBUTING.md000066400000000000000000000010451442423076500206220ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) one or more of the maintainers (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. prometheus-client-mmap-v0.23.1/Gemfile000066400000000000000000000007361442423076500176720ustar00rootroot00000000000000source 'https://rubygems.org' gemspec def ruby_version?(constraint) Gem::Dependency.new('', constraint).match?('', RUBY_VERSION) end group :test do gem 'oj', '> 3' gem 'json', '< 2.0' if ruby_version?('< 2.0') gem 'simplecov' gem 'rack', '< 2.0' if ruby_version?('< 2.2.2') gem 'rack-test' gem 'rake' gem 'pry' gem 'rb_sys', '~> 0.9' gem 'rspec' gem 'rubocop', ruby_version?('< 2.0') ? '< 0.42' : nil gem 'tins', '< 1.7' if ruby_version?('< 2.0') end prometheus-client-mmap-v0.23.1/LICENSE000066400000000000000000000261351442423076500174050ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prometheus-client-mmap-v0.23.1/NOTICE000066400000000000000000000002731442423076500172770ustar00rootroot00000000000000Prometheus instrumentation library for Ruby applications Copyright 2013-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). prometheus-client-mmap-v0.23.1/README.md000066400000000000000000000212401442423076500176470ustar00rootroot00000000000000# Prometheus Ruby Mmap Client This Prometheus library is fork of [Prometheus Ruby Client](https://github.com/prometheus/client_ruby) that uses mmap'ed files to share metrics from multiple processes. This allows efficient metrics processing for Ruby web apps running in multiprocess setups like Unicorn. A suite of instrumentation metric primitives for Ruby that can be exposed through a HTTP interface. Intended to be used together with a [Prometheus server][1]. [![Gem Version][4]](http://badge.fury.io/rb/prometheus-client-mmap) [![Build Status][3]](https://gitlab.com/gitlab-org/prometheus-client-mmap/commits/master) [![Dependency Status][5]](https://gemnasium.com/prometheus/prometheus-client-mmap) ## Usage ### Overview ```ruby require 'prometheus/client' # returns a default registry prometheus = Prometheus::Client.registry # create a new counter metric http_requests = Prometheus::Client::Counter.new(:http_requests, 'A counter of HTTP requests made') # register the metric prometheus.register(http_requests) # equivalent helper function http_requests = prometheus.counter(:http_requests, 'A counter of HTTP requests made') # start using the counter http_requests.increment ``` ## Rust extension (experimental) In an effort to improve maintainability, there is now an optional Rust implementation that reads the metric files and outputs the multiprocess metrics to text. If `rustc` is available, then the Rust extension will be built automatically. The `use_rust` keyword argument can be used: ```ruby puts Prometheus::Client::Formats::Text.marshal_multiprocess(use_rust: true) ``` Note that this parameter will likely be deprecated and removed once the Rust extension becomes the default mode. ### Rack middleware There are two [Rack][2] middlewares available, one to expose a metrics HTTP endpoint to be scraped by a prometheus server ([Exporter][9]) and one to trace all HTTP requests ([Collector][10]). It's highly recommended to enable gzip compression for the metrics endpoint, for example by including the `Rack::Deflater` middleware. ```ruby # config.ru require 'rack' require 'prometheus/client/rack/collector' require 'prometheus/client/rack/exporter' use Rack::Deflater, if: ->(env, status, headers, body) { body.any? && body[0].length > 512 } use Prometheus::Client::Rack::Collector use Prometheus::Client::Rack::Exporter run ->(env) { [200, {'Content-Type' => 'text/html'}, ['OK']] } ``` Start the server and have a look at the metrics endpoint: [http://localhost:5000/metrics](http://localhost:5000/metrics). For further instructions and other scripts to get started, have a look at the integrated [example application](examples/rack/README.md). ### Pushgateway The Ruby client can also be used to push its collected metrics to a [Pushgateway][8]. This comes in handy with batch jobs or in other scenarios where it's not possible or feasible to let a Prometheus server scrape a Ruby process. TLS and HTTP basic authentication are supported. ```ruby require 'prometheus/client' require 'prometheus/client/push' registry = Prometheus::Client.registry # ... register some metrics, set/increment/observe/etc. their values # push the registry state to the default gateway Prometheus::Client::Push.new(job: 'my-batch-job').add(registry) # optional: specify a grouping key that uniquely identifies a job instance, and gateway. # # Note: the labels you use in the grouping key must not conflict with labels set on the # metrics being pushed. If they do, an error will be raised. Prometheus::Client::Push.new( job: 'my-batch-job', gateway: 'https://example.domain:1234', grouping_key: { instance: 'some-instance', extra_key: 'foobar' } ).add(registry) # If you want to replace any previously pushed metrics for a given grouping key, # use the #replace method. # # Unlike #add, this will completely replace the metrics under the specified grouping key # (i.e. anything currently present in the pushgateway for the specified grouping key, but # not present in the registry for that grouping key will be removed). # # See https://github.com/prometheus/pushgateway#put-method for a full explanation. Prometheus::Client::Push.new(job: 'my-batch-job').replace(registry) # If you want to delete all previously pushed metrics for a given grouping key, # use the #delete method. Prometheus::Client::Push.new(job: 'my-batch-job').delete ``` ## Metrics The following metric types are currently supported. ### Counter Counter is a metric that exposes merely a sum or tally of things. ```ruby counter = Prometheus::Client::Counter.new(:service_requests_total, '...') # increment the counter for a given label set counter.increment({ service: 'foo' }) # increment by a given value counter.increment({ service: 'bar' }, 5) # get current value for a given label set counter.get({ service: 'bar' }) # => 5 ``` ### Gauge Gauge is a metric that exposes merely an instantaneous value or some snapshot thereof. ```ruby gauge = Prometheus::Client::Gauge.new(:room_temperature_celsius, '...') # set a value gauge.set({ room: 'kitchen' }, 21.534) # retrieve the current value for a given label set gauge.get({ room: 'kitchen' }) # => 21.534 ``` ### Histogram A histogram samples observations (usually things like request durations or response sizes) and counts them in configurable buckets. It also provides a sum of all observed values. ```ruby histogram = Prometheus::Client::Histogram.new(:service_latency_seconds, '...') # record a value histogram.observe({ service: 'users' }, Benchmark.realtime { service.call(arg) }) # retrieve the current bucket values histogram.get({ service: 'users' }) # => { 0.005 => 3, 0.01 => 15, 0.025 => 18, ..., 2.5 => 42, 5 => 42, 10 = >42 } ``` ### Summary Summary, similar to histograms, is an accumulator for samples. It captures Numeric data and provides an efficient percentile calculation mechanism. ```ruby summary = Prometheus::Client::Summary.new(:service_latency_seconds, '...') # record a value summary.observe({ service: 'database' }, Benchmark.realtime { service.call() }) # retrieve the current quantile values summary.get({ service: 'database' }) # => { 0.5 => 0.1233122, 0.9 => 3.4323, 0.99 => 5.3428231 } ``` ## Configuration ### Memory mapped files storage location Set `prometheus_multiproc_dir` environment variable to the path where you want metric files to be stored. Example: ``` prometheus_multiproc_dir=/tmp ``` ## Pitfalls ### PID cardinality In multiprocess setup e.g. running under Unicorn, having worker process restart often can lead to performance problems when proccesing metric files. By default each process using Prometheus metrics will create a set of files based on that process PID. With high worker churn this will lead to creation of thousands of files and in turn will cause very noticable slowdown when displaying metrics To reduce this problem, a surrogate process id can be used. Set of all such IDs needs have low cardinality, and each process id must be unique among all running process. For Unicorn a worker id/number can be used to greatly speedup the metrics rendering. To use it add this line to your `configure` block: ```ruby config.pid_provider = Prometheus::Client::Support::Unicorn.method(:worker_pid_provider) ``` ## Tools ###`bin/parse` This command can be used to parse metric files located on the filesystem just like a metric exporter would. It outputs either `json` formatted raw data or digested data in prometheus `text` format. #### Usage: ```bash $ ./bin/parse -h Usage: parse [options] files... -t, --to-prometheus-text format output using Prometheus text formatter -p, --profile enable profiling -h, --help Show this message ``` ## Development After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. To install this gem onto your local machine, run `bundle exec rake install`. ### Releasing a new version To release a new version: 1. Update `lib/prometheus/client/version.rb` with the version number. 1. Update `CHANGELOG.md` with the changes in the release. 1. Create a merge request and merge it to `master`. 1. Push a new tag to the repository. The new version with precompiled, native gems will automatically be published to [RubyGems](https://rubygems.org/gems/prometheus-client-mmap) when the pipeline for the tag completes. [1]: https://github.com/prometheus/prometheus [2]: http://rack.github.io/ [3]: https://gitlab.com/gitlab-org/prometheus-client-mmap/badges/master/pipeline.svg [4]: https://badge.fury.io/rb/prometheus-client.svg [8]: https://github.com/prometheus/pushgateway [9]: lib/prometheus/client/rack/exporter.rb [10]: lib/prometheus/client/rack/collector.rb prometheus-client-mmap-v0.23.1/Rakefile000066400000000000000000000032411442423076500200360ustar00rootroot00000000000000require 'bundler' require 'rspec/core/rake_task' require 'rubocop/rake_task' require 'rake/extensiontask' require 'gem_publisher' require 'rb_sys' cross_rubies = %w[3.2.0 3.1.0 3.0.0 2.7.0] cross_platforms = %w[ aarch64-linux arm64-darwin x86_64-darwin x86_64-linux ] desc 'Default: run specs' task default: [:spec] # test alias task test: :spec desc 'Run specs' RSpec::Core::RakeTask.new do |t| t.rspec_opts = '--require ./spec/spec_helper.rb' end desc 'Lint code' RuboCop::RakeTask.new Bundler::GemHelper.install_tasks desc 'Publish gem to RubyGems.org' task :publish_gem do |_t| gem = GemPublisher.publish_if_updated('prometheus-client-mmap.gemspec', :rubygems) puts "Published #{gem}" if gem end task :console do exec 'irb -r prometheus -I ./lib' end gemspec = Gem::Specification.load(File.expand_path('../prometheus-client-mmap.gemspec', __FILE__)) Gem::PackageTask.new(gemspec) Rake::ExtensionTask.new('fast_mmaped_file', gemspec) do |ext| ext.cross_compile = true ext.cross_platform = cross_platforms end Rake::ExtensionTask.new('fast_mmaped_file_rs', gemspec) do |ext| ext.source_pattern = "*.{rs,toml}" ext.cross_compile = true ext.cross_platform = cross_platforms end namespace "gem" do task "prepare" do sh "bundle" end cross_platforms.each do |plat| desc "Build the native gem for #{plat}" task plat => "prepare" do require "rake_compiler_dock" ENV["RCD_IMAGE"] = "rbsys/#{plat}:#{RbSys::VERSION}" RakeCompilerDock.sh <<~SH, platform: plat bundle && \ RUBY_CC_VERSION="#{cross_rubies.join(":")}" \ rake native:#{plat} pkg/#{gemspec.full_name}-#{plat}.gem SH end end end prometheus-client-mmap-v0.23.1/bin/000077500000000000000000000000001442423076500171415ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/bin/console000077500000000000000000000001331442423076500205260ustar00rootroot00000000000000#!/usr/bin/env ruby require 'bundler/setup' require 'prometheus' require 'pry' Pry.start prometheus-client-mmap-v0.23.1/bin/parse000077500000000000000000000046631442423076500202120ustar00rootroot00000000000000#!/usr/bin/env ruby $LOAD_PATH << File.expand_path('../../lib', __FILE__) require 'prometheus/client' require 'prometheus/client/helper/plain_file' require 'prometheus/client/helper/metrics_processing' require 'prometheus/client/helper/metrics_representation' require 'json' require 'optparse' require 'fast_mmaped_file' options = {} OptionParser.new do |opts| opts.banner = 'Usage: parse [options] files...' opts.on('-t', '--to-prometheus-text', 'format output using Prometheus text formatter') do |v| options[:prom_text] = v end opts.on('-s', '--to-prometheus-text-slow', 'format output using Prometheus Ruby based text formatter') do |v| options[:prom_text_slow] = v end opts.on('-p', '--profile', 'enable profiling') do |v| options[:profile] = v end opts.on_tail('-h', '--help', 'Show this message') do puts opts exit end end.parse! class Object def transform(&block) yield self end end # Output closely resembling binary file contents # best used with `jq` def to_json(files) files.map { |f| Prometheus::Client::Helper::PlainFile.new(f) } .map { |f| { filepath: f.filepath, entries: entries_to_json(f.parsed_entries(true)) } } .transform { |s| JSON.dump(s); nil } end def to_prom_text(files) files.map {|f| Prometheus::Client::Helper::PlainFile.new(f) } .map { |f| [f.filepath, f.multiprocess_mode.to_sym, f.type.to_sym, f.pid] } .transform { |files| FastMmapedFile.to_metrics(files.to_a) } end def to_prom_text_slow(files) files.map { |f| Prometheus::Client::Helper::PlainFile.new(f) } .each_with_object({}) { |f, metrics| f.to_metrics(metrics, true) } .transform(&Prometheus::Client::Helper::MetricsProcessing.method(:merge_metrics)) .transform(&Prometheus::Client::Helper::MetricsRepresentation.method(:to_text)) end def entries_to_json(entries) entries.map { |e, v| entry_to_json(*JSON.load(e)).merge(value: v) } end def entry_to_json(metric=nil, name=nil, labels=[], label_values=[]) { metric: metric, name: name, labels: labels.zip(label_values).to_h } end def run(profile = false) if profile require 'ruby-prof' RubyProf.start yield result = RubyProf.stop printer = RubyProf::FlatPrinter.new(result) printer.print(STDERR) else yield end end run(options[:profile]) do if options[:prom_text] puts to_prom_text(ARGV) elsif options[:prom_text_slow] puts to_prom_text_slow(ARGV) else puts to_json(ARGV) end end prometheus-client-mmap-v0.23.1/bin/setup000077500000000000000000000022261442423076500202310ustar00rootroot00000000000000#!/usr/bin/env bash set -euo pipefail IFS=$'\n\t' set -vx bundle install bundle exec rake compile if cargo nextest --version > /dev/null 2>&1; then exit; fi # Check if rust is managed by 'asdf' if command -v cargo | grep '.asdf/shims'; then # This will fail if no rust version has been specified in asdf rust_path="$(asdf where rust)/bin" # Check for $CARGO_HOME that may not be in $HOME # We use '/dev/null' as a fallback value known to be present and not a directory elif [ -d "${CARGO_HOME:-/dev/null}/bin" ]; then rust_path="${CARGO_HOME}/bin" # Default path for rustup.rs elif [ -d "${HOME}/.cargo/bin" ]; then rust_path="${HOME}/.cargo/bin" else echo "No rust toolchain found, skipping installation of 'cargo nextest'" exit fi if [ "$(uname -s)" = 'Darwin' ]; then host_os='mac' elif [ "$(uname -s)" = 'Linux' ] && [ "$(uname -m)" = 'x86_64' ]; then host_os='linux' else echo "Auto-install for 'cargo nextest' only available on MacOS and x86_64 Linux. Download manually from https://nexte.st/" exit fi echo "Installing 'cargo nextest'..." curl -LsSf "https://get.nexte.st/latest/${host_os}" | tar zxf - -C "${rust_path}" prometheus-client-mmap-v0.23.1/example.rb000066400000000000000000000016501442423076500203530ustar00rootroot00000000000000$LOAD_PATH.unshift("./lib") require 'prometheus/client' require 'prometheus/client/formats/text.rb' require 'pp' prometheus = Prometheus::Client.registry counter = Prometheus::Client::Counter.new(:mycounter, 'Example counter') gauge = Prometheus::Client::Gauge.new(:mygauge, 'Example gauge', {}, :livesum) histogram = Prometheus::Client::Histogram.new(:myhistogram, 'Example histogram', {}, [0, 1, 2]) prometheus.register(counter) prometheus.register(gauge) prometheus.register(histogram) counter.increment({'foo': 'bar'}, 2) counter.increment({'foo': 'biz'}, 4) gauge.set({'foo': 'bar'}, 3) gauge.set({'foo': 'biz'}, 3) gauge.decrement({'foo': 'bar'}, 1) histogram.observe({'foo': 'bar'}, 0.5) histogram.observe({'foo': 'biz'}, 0.5) histogram.observe({'foo': 'bar'}, 1.5) histogram.observe({'foo': 'biz'}, 2) #puts Prometheus::Client::Formats::Text.marshal(prometheus) puts Prometheus::Client::Formats::Text.marshal_multiprocess prometheus-client-mmap-v0.23.1/examples/000077500000000000000000000000001442423076500202075ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/examples/rack/000077500000000000000000000000001442423076500211275ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/examples/rack/.gitignore000066400000000000000000000000231442423076500231120ustar00rootroot00000000000000data/ Gemfile.lock prometheus-client-mmap-v0.23.1/examples/rack/Gemfile000066400000000000000000000001371442423076500224230ustar00rootroot00000000000000source 'https://rubygems.org' gem 'prometheus-client', path: '../..' gem 'rack' gem 'unicorn' prometheus-client-mmap-v0.23.1/examples/rack/README.md000066400000000000000000000027411442423076500224120ustar00rootroot00000000000000# Rack example A simple Rack application which shows how to use prometheus' `Rack::Exporter` and `Rack::Collector` rack middlwares. ## Run the example ### Standalone Execute the provided `run` script: ```bash bundle install bundle exec ./run ``` This will start the rack app, run a few requests against it, print the output of `/metrics` and terminate. ### With a Prometheus server Start a Prometheus server with the provided config: ```bash prometheus -config.file ./prometheus.yaml ``` In another terminal, start the application server: ```bash bundle install bundle exec unicorn -c ./unicorn.conf ``` You can now open the [example app](http://localhost:5000/) and its [metrics page](http://localhost:5000/metrics) to inspect the output. The running Prometheus server can be used to [play around with the metrics][rate-query]. [rate-query]: http://localhost:9090/graph#%5B%7B%22range_input%22%3A%221h%22%2C%22expr%22%3A%22rate(http_request_duration_seconds_count%5B1m%5D)%22%2C%22tab%22%3A0%7D%5D ## Collector The example shown in [`config.ru`](config.ru) is a trivial rack application using the default collector and exporter middlewares. In order to use a custom label builder in the collector, change the line to something like this: ```ruby use Prometheus::Client::Rack::Collector do |env| { method: env['REQUEST_METHOD'].downcase, host: env['HTTP_HOST'].to_s, path: env['PATH_INFO'].to_s, http_version: env['HTTP_VERSION'].to_s, } end ``` prometheus-client-mmap-v0.23.1/examples/rack/config.ru000077500000000000000000000005041442423076500227460ustar00rootroot00000000000000require 'rack' require 'prometheus/client/rack/collector' require 'prometheus/client/rack/exporter' use Rack::Deflater, if: ->(_, _, _, body) { body.any? && body[0].length > 512 } use Prometheus::Client::Rack::Collector use Prometheus::Client::Rack::Exporter run ->(_) { [200, { 'Content-Type' => 'text/html' }, ['OK']] } prometheus-client-mmap-v0.23.1/examples/rack/prometheus.yml000066400000000000000000000003531442423076500240460ustar00rootroot00000000000000global: scrape_interval: "15s" scrape_configs: - job_name: "prometheus" static_configs: - targets: - "localhost:9090" - job_name: "rack-example" static_configs: - targets: - "localhost:5000" prometheus-client-mmap-v0.23.1/examples/rack/run000077500000000000000000000013221442423076500216570ustar00rootroot00000000000000#!/bin/bash -e trap 'kill $(jobs -p)' EXIT installed() { which $1 > /dev/null } log() { echo >&2 $1 } fatal() { log $1 exit 1 } if ! installed vegeta; then if ! installed go; then fatal "Could not find go. Either run the examples manually or install" fi go get github.com/tsenart/vegeta go install github.com/tsenart/vegeta fi PORT=5000 URL=http://127.0.0.1:${PORT}/ log "starting example server" bundle install --quiet bundle exec unicorn -p ${PORT} -c unicorn.conf &>> /dev/null & # wait until unicorn is available sleep 1 log "sending requests for 5 seconds" printf "GET ${URL}\nPOST ${URL}\nDELETE ${URL}" | vegeta attack -duration 5s &>> /dev/null log "printing /metrics" curl -s "${URL}metrics" prometheus-client-mmap-v0.23.1/examples/rack/unicorn.conf000066400000000000000000000000601442423076500234470ustar00rootroot00000000000000listen 5000 worker_processes 1 preload_app true prometheus-client-mmap-v0.23.1/ext/000077500000000000000000000000001442423076500171715ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/000077500000000000000000000000001442423076500224505ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/extconf.rb000066400000000000000000000014171442423076500244460ustar00rootroot00000000000000require 'mkmf' require 'fileutils' $CFLAGS << ' -std=c99 -D_POSIX_C_SOURCE=200809L -Wall -Wextra' if enable_config('fail-on-warning') $CFLAGS << ' -Werror' end if enable_config('debug') $CFLAGS << ' -O0 -g' end if enable_config('address-sanitizer') $CFLAGS << ' -O -fsanitize=address -fno-omit-frame-pointer -g' end CONFIG['warnflags'].slice!(/ -Wdeclaration-after-statement/) cwd = File.expand_path(File.dirname(__FILE__)) vendor_dir = File.join(cwd, '../../vendor/c') src_dir = File.join(cwd, '../../ext/fast_mmaped_file') src_files = %W[#{vendor_dir}/jsmn/jsmn.c #{vendor_dir}/hashmap/src/hashmap.c] FileUtils.cp(src_files, src_dir) $INCFLAGS << " -I#{vendor_dir}/jsmn -I#{vendor_dir}/hashmap/src" dir_config('fast_mmaped_file') create_makefile('fast_mmaped_file') prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/fast_mmaped_file.c000066400000000000000000000064611442423076500261020ustar00rootroot00000000000000#include #include #include #include #include #include #include "file_parsing.h" #include "file_reading.h" #include "globals.h" #include "mmap.h" #include "rendering.h" #include "utils.h" #include "value_access.h" VALUE MMAPED_FILE = Qnil; ID sym_min; ID sym_max; ID sym_livesum; ID sym_gauge; ID sym_pid; ID sym_samples; VALUE prom_eParsingError; int aggregate_files(struct hashmap *map, VALUE list_of_files) { buffer_t reading_buffer; memset(&reading_buffer, 0, sizeof(buffer_t)); for (int i = 0; i < RARRAY_LEN(list_of_files); i++) { VALUE params = RARRAY_PTR(list_of_files)[i]; file_t file; if (!file_open_from_params(&file, params)) { buffer_dispose(&reading_buffer); return 0; } if (!read_from_file(&file, &reading_buffer)) { buffer_dispose(&reading_buffer); file_close(&file); return 0; } if (!process_buffer(&file, &reading_buffer, map)) { buffer_dispose(&reading_buffer); file_close(&file); return 0; } if (!file_close(&file)) { buffer_dispose(&reading_buffer); return 0; } } buffer_dispose(&reading_buffer); return 1; } VALUE method_to_metrics(VALUE UNUSED(self), VALUE file_list) { struct hashmap map; hashmap_setup(&map); if (!aggregate_files(&map, file_list)) { // all entries in map are now copies that need to be disposed hashmap_destroy(&map); raise_last_exception(); return Qnil; } entry_t **sorted_entries; if (!sort_map_entries(&map, &sorted_entries)) { hashmap_destroy(&map); raise_last_exception(); return Qnil; } VALUE rv = rb_str_new("", 0); if (!entries_to_string(rv, sorted_entries, hashmap_size(&map))) { free(sorted_entries); hashmap_destroy(&map); raise_last_exception(); return Qnil; } RB_GC_GUARD(file_list); // ensure file list is not GCed before this point free(sorted_entries); hashmap_destroy(&map); return rv; } void Init_fast_mmaped_file() { sym_gauge = rb_intern("gauge"); sym_min = rb_intern("min"); sym_max = rb_intern("max"); sym_livesum = rb_intern("livesum"); sym_pid = rb_intern("pid"); sym_samples = rb_intern("samples"); prom_eParsingError = rb_define_class("PrometheusParsingError", rb_eRuntimeError); MMAPED_FILE = rb_define_class("FastMmapedFile", rb_cObject); rb_define_const(MMAPED_FILE, "MAP_SHARED", INT2FIX(MAP_SHARED)); rb_define_singleton_method(MMAPED_FILE, "to_metrics", method_to_metrics, 1); rb_define_alloc_func(MMAPED_FILE, mm_s_alloc); rb_define_singleton_method(MMAPED_FILE, "new", mm_s_new, -1); rb_define_method(MMAPED_FILE, "initialize", mm_init, 1); rb_define_method(MMAPED_FILE, "slice", mm_aref_m, -1); rb_define_method(MMAPED_FILE, "sync", mm_msync, -1); rb_define_method(MMAPED_FILE, "munmap", mm_unmap, 0); rb_define_method(MMAPED_FILE, "used", method_load_used, 0); rb_define_method(MMAPED_FILE, "used=", method_save_used, 1); rb_define_method(MMAPED_FILE, "fetch_entry", method_fetch_entry, 3); rb_define_method(MMAPED_FILE, "upsert_entry", method_upsert_entry, 3); } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/file_format.c000066400000000000000000000002431442423076500251020ustar00rootroot00000000000000#include "file_format.h" inline uint32_t padding_length(uint32_t key_length) { return 8 - (sizeof(uint32_t) + key_length) % 8; // padding | 8 byte aligned } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/file_format.h000066400000000000000000000002741442423076500251130ustar00rootroot00000000000000#ifndef FILE_FORMAT_H #define FILE_FORMAT_H #include #define START_POSITION 8 #define INITIAL_SIZE (2 * sizeof(int32_t)) uint32_t padding_length(uint32_t key_length); #endifprometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/file_parsing.c000066400000000000000000000134731442423076500252660ustar00rootroot00000000000000#include "file_parsing.h" #include #include #include "file_format.h" #include "globals.h" #include "utils.h" HASHMAP_FUNCS_CREATE(entry, const entry_t, entry_t) typedef int (*compare_fn)(const void *a, const void *b); static size_t hashmap_hash_entry(const entry_t *entry) { return hashmap_hash_string(entry->json); } static int hashmap_compare_entry(const entry_t *a, const entry_t *b) { if (a->json_size != b->json_size) { return -1; } if (is_pid_significant(a) && (rb_str_equal(a->pid, b->pid) == Qfalse)) { return -1; } return strncmp(a->json, b->json, a->json_size); } static void entry_free(entry_t *entry) { free(entry->json); free(entry); } static inline double min(double a, double b) { return a < b ? a : b; } static inline double max(double a, double b) { return a > b ? a : b; } static void merge_entry(entry_t *found, const entry_t *entry) { if (entry->type == sym_gauge) { if (entry->multiprocess_mode == sym_min) { found->value = min(found->value, entry->value); } else if (entry->multiprocess_mode == sym_max) { found->value = max(found->value, entry->value); } else if (entry->multiprocess_mode == sym_livesum) { found->value += entry->value; } else { found->value = entry->value; } } else { found->value += entry->value; } } void merge_or_store(struct hashmap *map, entry_t *entry) { entry_t *found = entry_hashmap_get(map, entry); if (found) { merge_entry(found, entry); entry_free(entry); } else { entry_hashmap_put(map, entry, entry); // use the hashmap like hashset actually } } entry_t *entry_new(buffer_t *source, uint32_t pos, uint32_t encoded_len, file_t *file_info) { entry_t *entry = calloc(1, sizeof(entry_t)); if (entry == NULL) { return NULL; } entry->json = malloc(encoded_len + 1); if (entry->json == NULL) { free(entry); return NULL; } memcpy(entry->json, source->buffer + pos, encoded_len); entry->json[encoded_len] = '\0'; entry->json_size = encoded_len; entry->pid = file_info->pid; entry->multiprocess_mode = file_info->multiprocess_mode; entry->type = file_info->type; char *value_ptr = source->buffer + pos + encoded_len + padding_length(encoded_len); memcpy(&(entry->value), value_ptr, sizeof(double)); return entry; } static int add_parsed_name(entry_t *entry) { jsmn_parser parser; jsmn_init(&parser); jsmntok_t tokens[2]; memset(&tokens, 0, sizeof(tokens)); jsmn_parse(&parser, entry->json, entry->json_size, tokens, 2); jsmntok_t *name_token = &tokens[1]; if (name_token->start < name_token->end && name_token->start > 0) { entry->name = entry->json + name_token->start; entry->name_len = name_token->end - name_token->start; return 1; } return 0; } static int entry_lexical_comparator(const entry_t **a, const entry_t **b) { size_t size_a = (*a)->json_size; size_t size_b = (*b)->json_size; size_t min_length = size_a < size_b ? size_a : size_b; return strncmp((*a)->json, (*b)->json, min_length); } void hashmap_setup(struct hashmap *map) { hashmap_init(map, (size_t(*)(const void *))hashmap_hash_entry, (int (*)(const void *, const void *))hashmap_compare_entry, 1000); hashmap_set_key_alloc_funcs(map, NULL, (void (*)(void *))entry_free); } int process_buffer(file_t *file_info, buffer_t *source, struct hashmap *map) { if (source->size < START_POSITION) { // nothing to read return 1; } uint32_t used; memcpy(&used, source->buffer, sizeof(uint32_t)); if (used > source->size) { save_exception(prom_eParsingError, "source file %s corrupted, used %u > file size %u", file_info->path, used, source->size); return 0; } uint32_t pos = START_POSITION; while (pos + sizeof(uint32_t) < used) { uint32_t encoded_len; memcpy(&encoded_len, source->buffer + pos, sizeof(uint32_t)); pos += sizeof(uint32_t); uint32_t value_offset = encoded_len + padding_length(encoded_len); if (pos + value_offset + sizeof(double) > used) { save_exception(prom_eParsingError, "source file %s corrupted, used %u < stored data length %u", file_info->path, used, pos + value_offset + sizeof(double)); return 0; } entry_t *entry = entry_new(source, pos, encoded_len, file_info); if (entry == NULL) { save_exception(rb_eNoMemError, "Failed creating metrics entry"); return 0; } merge_or_store(map, entry); pos += value_offset + sizeof(double); } return 1; } int sort_map_entries(const struct hashmap *map, entry_t ***sorted_entries) { size_t num = hashmap_size(map); entry_t **list = calloc(num, sizeof(entry_t *)); if (list == NULL) { save_exception(rb_eNoMemError, "Couldn't allocate for %zu memory", num * sizeof(entry_t *)); return 0; } size_t cnt = 0; struct hashmap_iter *iter; for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) { entry_t *entry = (entry_t *)entry_hashmap_iter_get_key(iter); if (add_parsed_name(entry)) { list[cnt] = entry; cnt++; } } if (cnt != num) { save_exception(rb_eRuntimeError, "Processed entries %zu != map entries %zu", cnt, num); free(list); return 0; } qsort(list, cnt, sizeof(entry_t *), (compare_fn)&entry_lexical_comparator); *sorted_entries = list; return 1; } int is_pid_significant(const entry_t *e) { ID mp = e->multiprocess_mode; return e->type == sym_gauge && !(mp == sym_min || mp == sym_max || mp == sym_livesum); } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/file_parsing.h000066400000000000000000000010301442423076500252550ustar00rootroot00000000000000#ifndef FILE_PARSING_H #define FILE_PARSING_H #include #include #include typedef struct { char *json; size_t json_size; char *name; size_t name_len; ID multiprocess_mode; ID type; VALUE pid; double value; } entry_t; void hashmap_setup(struct hashmap *map); int process_buffer(file_t *file_info, buffer_t *source, struct hashmap *map); int sort_map_entries(const struct hashmap *map, entry_t ***sorted_entries); int is_pid_significant(const entry_t *e); #endifprometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/file_reading.c000066400000000000000000000053761442423076500252370ustar00rootroot00000000000000#include "file_reading.h" #include #include #include "utils.h" static int file_open(file_t *source, const char *filepath) { source->file = fopen(filepath, "r"); size_t filepath_len = strlen(filepath) + sizeof(char); source->path = malloc(filepath_len); memcpy(source->path, filepath, filepath_len); if (source->file == NULL) { save_exception(rb_eArgError, "Can't open %s, errno: %d", filepath, errno); return 0; } struct stat sb; if (fstat(fileno(source->file), &sb) != 0) { fclose(source->file); save_exception(rb_eIOError, "Can't stat file, errno: %d", errno); return 0; } source->length = sb.st_size; // go to start if (fseek(source->file, 0L, SEEK_SET) != 0) { fclose(source->file); save_exception(rb_eIOError, "Can't fseek %zu, errno: %d", 0, errno); return 0; } return 1; } int file_close(file_t *source) { free(source->path); if (fclose(source->file) != 0) { save_exception(rb_eIOError, "Can't fclose file, errno: %d", 0, errno); return 0; } source->file = 0; return 1; } int file_open_from_params(file_t *source, VALUE params) { if (RARRAY_LEN(params) != 4) { save_exception(rb_eArgError, "wrong number of arguments %lu instead of 4", RARRAY_LEN(params)); return 0; } VALUE filepath = rb_ary_entry(params, 0); source->multiprocess_mode = rb_sym2id(rb_ary_entry(params, 1)); source->type = rb_sym2id(rb_ary_entry(params, 2)); source->pid = rb_ary_entry(params, 3); return file_open(source, StringValueCStr(filepath)); } int read_from_file(const file_t *source, buffer_t *data) { data->size = 0; if (data->buffer == NULL) { data->buffer = malloc(source->length); if (data->buffer == NULL) { save_exception(rb_eIOError, "Can't malloc %zu, errno: %d", source->length, errno); return 0; } data->capacity = source->length; } else if (data->capacity < source->length) { data->buffer = realloc(data->buffer, source->length); if (data->buffer == NULL) { save_exception(rb_eIOError, "Can't realloc %zu, errno: %d", source->length, errno); return 0; } data->capacity = source->length; } data->size = fread(data->buffer, sizeof(char), source->length, source->file); if (data->size != source->length) { save_exception(rb_eIOError, "Couldn't read whole file, read %zu, instead of %zu", data->size, source->length); return 0; } return 1; } void buffer_dispose(buffer_t *buffer) { if (buffer->buffer) { free(buffer->buffer); } buffer->buffer = NULL; buffer->size = 0; buffer->capacity = 0; } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/file_reading.h000066400000000000000000000010121442423076500252230ustar00rootroot00000000000000#ifndef FILE_READING_H #define FILE_READING_H #include typedef struct { FILE *file; size_t length; char *path; // Information processed from file path ID multiprocess_mode; ID type; VALUE pid; } file_t; typedef struct { char *buffer; size_t size; size_t capacity; } buffer_t; int file_close(file_t *file); int file_open_from_params(file_t *file, VALUE params); int read_from_file(const file_t *source, buffer_t *data); void buffer_dispose(buffer_t *buffer); #endif prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/globals.h000066400000000000000000000003341442423076500242440ustar00rootroot00000000000000#ifndef GLOBALS_H #define GLOBALS_H #include extern ID sym_min; extern ID sym_max; extern ID sym_livesum; extern ID sym_gauge; extern ID sym_pid; extern ID sym_samples; extern VALUE prom_eParsingError; #endifprometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/mmap.c000066400000000000000000000254061442423076500235550ustar00rootroot00000000000000#include "mmap.h" #include #include #include #include #include "file_format.h" #include "utils.h" #if 0 #include #define DEBUGF(format, ...) printf("%d: " format "\n", __LINE__, __VA_ARGS__) #else #define DEBUGF(format, ...) #endif /* This is the ID of the WeakMap used to track strings allocated that * are backed by a memory-mapped file. */ #define WEAK_OBJ_TRACKER "@weak_obj_tracker" /** * Maps a given VALUE to some key for the WeakMap. For now, we just use * the integer value as the key since that suffices, though this does * require Ruby 2.7 due to https://bugs.ruby-lang.org/issues/16035. */ static VALUE weak_obj_tracker_get_key(VALUE val) { return val; } /** * Adds a T_STRING type to the WeakMap. The WeakMap should be stored * as an instance variable. */ static void weak_obj_tracker_add(VALUE obj, VALUE val) { Check_Type(val, T_STRING); VALUE tracker = rb_iv_get(obj, WEAK_OBJ_TRACKER); VALUE key = weak_obj_tracker_get_key(val); rb_funcall(tracker, rb_intern("[]="), 2, key, val); } /** * Iterator function for updating a single element from the WeakMap. */ VALUE mm_update_obj_i(RB_BLOCK_CALL_FUNC_ARGLIST(i, self)) { Check_Type(self, T_DATA); Check_Type(i, T_STRING); rb_check_arity(argc, 1, 1); mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); RSTRING(i)->as.heap.ptr = i_mm->t->addr; RSTRING(i)->as.heap.len = i_mm->t->real; return Qtrue; } /** * This iterates through the WeakMap defined on the class and updates * the RStrings to use the newly-allocated memory region. */ void mm_update(VALUE obj) { VALUE tracker = rb_iv_get(obj, WEAK_OBJ_TRACKER); rb_block_call(tracker, rb_intern("each_value"), 0, NULL, mm_update_obj_i, obj); } typedef struct { VALUE obj, *argv; ID id; int flag, argc; } mm_bang; static VALUE mm_protect_bang(VALUE *t) { return rb_funcall2(t[0], (ID)t[1], (int)t[2], (VALUE *)t[3]); } static VALUE mm_recycle(VALUE str) { rb_gc_force_recycle(str); return str; } static VALUE mm_vunlock(VALUE obj) { mm_ipc *i_mm; GET_MMAP(obj, i_mm, 0); return Qnil; } static VALUE mm_str(VALUE obj, int modify) { mm_ipc *i_mm; VALUE ret = Qnil; GET_MMAP(obj, i_mm, modify & ~MM_ORIGIN); if (modify & MM_MODIFY) { if (i_mm->t->flag & MM_FROZEN) rb_error_frozen("mmap"); } ret = rb_obj_alloc(rb_cString); RSTRING(ret)->as.heap.ptr = i_mm->t->addr; RSTRING(ret)->as.heap.aux.capa = i_mm->t->len; RSTRING(ret)->as.heap.len = i_mm->t->real; weak_obj_tracker_add(obj, ret); DEBUGF("RString capa: %d, len: %d", RSTRING(ret)->as.heap.aux.capa, RSTRING(ret)->as.heap.len); if (modify & MM_ORIGIN) { #if HAVE_RB_DEFINE_ALLOC_FUNC RSTRING(ret)->as.heap.aux.shared = obj; FL_SET(ret, RSTRING_NOEMBED); FL_SET(ret, FL_USER18); #else RSTRING(ret)->orig = ret; #endif } if (i_mm->t->flag & MM_FROZEN) { ret = rb_obj_freeze(ret); } return ret; } static VALUE mm_i_bang(bang_st) mm_bang *bang_st; { VALUE str, res; mm_ipc *i_mm; str = mm_str(bang_st->obj, bang_st->flag); if (bang_st->flag & MM_PROTECT) { VALUE tmp[4]; tmp[0] = str; tmp[1] = (VALUE)bang_st->id; tmp[2] = (VALUE)bang_st->argc; tmp[3] = (VALUE)bang_st->argv; res = rb_ensure(mm_protect_bang, (VALUE)tmp, mm_recycle, str); } else { res = rb_funcall2(str, bang_st->id, bang_st->argc, bang_st->argv); RB_GC_GUARD(res); } if (res != Qnil) { GET_MMAP(bang_st->obj, i_mm, 0); i_mm->t->real = RSTRING_LEN(str); } return res; } static VALUE mm_bang_i(VALUE obj, int flag, ID id, int argc, VALUE *argv) { VALUE res; mm_ipc *i_mm; mm_bang bang_st; GET_MMAP(obj, i_mm, 0); if ((flag & MM_CHANGE) && (i_mm->t->flag & MM_FIXED)) { rb_raise(rb_eTypeError, "try to change the size of a fixed map"); } bang_st.obj = obj; bang_st.flag = flag; bang_st.id = id; bang_st.argc = argc; bang_st.argv = argv; if (i_mm->t->flag & MM_IPC) { res = rb_ensure(mm_i_bang, (VALUE)&bang_st, mm_vunlock, obj); } else { res = mm_i_bang(&bang_st); } if (res == Qnil) return res; return (flag & MM_ORIGIN) ? res : obj; } static void mm_free(mm_ipc *i_mm) { if (i_mm->t->path) { if (munmap(i_mm->t->addr, i_mm->t->len) != 0) { if (i_mm->t->path != (char *)-1 && i_mm->t->path != NULL) { free(i_mm->t->path); } free(i_mm); rb_raise(rb_eRuntimeError, "munmap failed at %s:%d with errno: %d", __FILE__, __LINE__, errno); } if (i_mm->t->path != (char *)-1) { if (i_mm->t->real < i_mm->t->len && i_mm->t->vscope != MAP_PRIVATE && truncate(i_mm->t->path, i_mm->t->real) == -1) { free(i_mm->t->path); free(i_mm); rb_raise(rb_eTypeError, "truncate"); } free(i_mm->t->path); } } free(i_mm); } /* * call-seq: * new(file) * * create a new Mmap object * * * file * * * Creates a mapping that's shared with all other processes * mapping the same areas of the file. * */ VALUE mm_s_new(int argc, VALUE *argv, VALUE obj) { VALUE res = rb_funcall2(obj, rb_intern("allocate"), 0, 0); rb_obj_call_init(res, argc, argv); return res; } VALUE mm_s_alloc(VALUE obj) { VALUE res; mm_ipc *i_mm; res = Data_Make_Struct(obj, mm_ipc, 0, mm_free, i_mm); i_mm->t = ALLOC_N(mm_mmap, 1); MEMZERO(i_mm->t, mm_mmap, 1); i_mm->t->fd = -1; return res; } size_t next_page_boundary(size_t value) { size_t page_size = sysconf(_SC_PAGESIZE); while (page_size < value) { page_size *= 2; } return page_size; } /* Reference implementations: * mozilla: https://hg.mozilla.org/mozilla-central/file/3d846420a907/xpcom/glue/FileUtils.cpp#l71 * glibc: https://github.com/lattera/glibc/blob/master/sysdeps/posix/posix_fallocate.c */ int reserve_mmap_file_bytes(int fd, size_t size) { #if __linux__ /* From https://stackoverflow.com/a/22820221: The difference with * ftruncate(2) is that (on file systems supporting it, e.g. Ext4) * disk space is indeed reserved by posix_fallocate but ftruncate * extends the file by adding holes (and without reserving disk * space). */ return posix_fallocate(fd, 0, size); #else /* We simplify the reference implemnetations since we generally * don't need to reserve more than a page size. */ return ftruncate(fd, size); #endif } VALUE mm_init(VALUE obj, VALUE fname) { struct stat st; int fd, smode = 0, pmode = 0, vscope, perm, init; MMAP_RETTYPE addr; mm_ipc *i_mm; char *path; size_t size = 0; off_t offset; vscope = 0; path = 0; fd = -1; VALUE klass = rb_eval_string("ObjectSpace::WeakMap"); VALUE weak_obj_tracker = rb_class_new_instance(0, NULL, klass); rb_iv_set(obj, WEAK_OBJ_TRACKER, weak_obj_tracker); fname = rb_str_to_str(fname); SafeStringValue(fname); path = StringValuePtr(fname); vscope = MAP_SHARED; size = 0; perm = 0666; smode = O_RDWR; pmode = PROT_READ | PROT_WRITE; if ((fd = open(path, smode, perm)) == -1) { rb_raise(rb_eArgError, "Can't open %s", path); } if (fstat(fd, &st) == -1) { close(fd); rb_raise(rb_eArgError, "Can't stat %s", path); } size = st.st_size; Data_Get_Struct(obj, mm_ipc, i_mm); offset = 0; init = 0; if (size == 0) { init = 1; size = INITIAL_SIZE; } /* We need to ensure the underlying file descriptor is at least a page size. * Otherwise, we could get a SIGBUS error if mmap() attempts to read or write * past the file. */ size_t reserve_size = next_page_boundary(size); if (reserve_mmap_file_bytes(fd, reserve_size) != 0) { close(fd); rb_raise(rb_eIOError, "Can't reserve %zu bytes for memory-mapped file in %s", reserve_size, path); } addr = mmap(0, size, pmode, vscope, fd, offset); if (addr == MAP_FAILED || !addr) { close(fd); rb_raise(rb_eArgError, "mmap failed (%d)", errno); } i_mm->t->fd = fd; i_mm->t->addr = addr; i_mm->t->len = size; if (!init) { i_mm->t->real = size; } i_mm->t->pmode = pmode; i_mm->t->vscope = vscope; i_mm->t->smode = smode & ~O_TRUNC; i_mm->t->path = (path) ? ruby_strdup(path) : (char *)-1; if (smode == O_WRONLY) { i_mm->t->flag |= MM_FIXED; } return obj; } /* * Document-method: [] * Document-method: slice * * call-seq: [](args) * * Element reference - with the following syntax: * * self[nth] * * retrieve the nth character * * self[start..last] * * return a substring from start to last * * self[start, length] * * return a substring of lenght characters from start */ VALUE mm_aref_m(int argc, VALUE *argv, VALUE obj) { return mm_bang_i(obj, MM_ORIGIN, rb_intern("[]"), argc, argv); } /* * Document-method: msync * Document-method: sync * Document-method: flush * * call-seq: msync * * flush the file */ VALUE mm_msync(int argc, VALUE *argv, VALUE obj) { mm_ipc *i_mm; GET_MMAP(obj, i_mm, MM_MODIFY); VALUE oflag; int ret; int flag = MS_SYNC; if (argc) { rb_scan_args(argc, argv, "01", &oflag); flag = NUM2INT(oflag); } if ((ret = msync(i_mm->t->addr, i_mm->t->len, flag)) != 0) { rb_raise(rb_eArgError, "msync(%d)", ret); } return obj; } /* * Document-method: munmap * Document-method: unmap * * call-seq: munmap * * terminate the association */ VALUE mm_unmap(VALUE obj) { mm_ipc *i_mm; GET_MMAP(obj, i_mm, 0); if (i_mm->t->path) { if (munmap(i_mm->t->addr, i_mm->t->len) != 0) { if (i_mm->t->path != (char *)-1 && i_mm->t->path != NULL) { free(i_mm->t->path); i_mm->t->path = NULL; } rb_raise(rb_eRuntimeError, "munmap failed at %s:%d with errno: %d", __FILE__, __LINE__, errno); } if (i_mm->t->path != (char *)-1) { if (i_mm->t->real < i_mm->t->len && i_mm->t->vscope != MAP_PRIVATE && truncate(i_mm->t->path, i_mm->t->real) == -1) { rb_raise(rb_eTypeError, "truncate"); } free(i_mm->t->path); } // Ensure any lingering RString values get a length of zero. We // can't zero out the address since GET_MMAP() inside // mm_update_obj_i() expects a non-null address and path. i_mm->t->len = 0; i_mm->t->real = 0; mm_update(obj); i_mm->t->addr = NULL; i_mm->t->path = NULL; } close(i_mm->t->fd); return Qnil; } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/mmap.h000066400000000000000000000035321442423076500235560ustar00rootroot00000000000000#ifndef MMAP_H #define MMAP_H #include #include #define MM_MODIFY 1 #define MM_ORIGIN 2 #define MM_CHANGE (MM_MODIFY | 4) #define MM_PROTECT 8 #define MM_FROZEN (1 << 0) #define MM_FIXED (1 << 1) #define MM_ANON (1 << 2) #define MM_LOCK (1 << 3) #define MM_IPC (1 << 4) #define MM_TMP (1 << 5) #ifndef MMAP_RETTYPE #define MMAP_RETTYPE void * #endif typedef struct { MMAP_RETTYPE addr; int smode, pmode, vscope; int advice, flag; VALUE key; size_t len, real; off_t offset; int fd; char *path; } mm_mmap; typedef struct { int count; mm_mmap *t; } mm_ipc; #define GET_MMAP(obj, i_mm, t_modify) \ Data_Get_Struct(obj, mm_ipc, i_mm); \ if (!i_mm->t->path || i_mm->t->fd < 0 || i_mm->t->addr == NULL || i_mm->t->addr == MAP_FAILED) { \ rb_raise(rb_eIOError, "unmapped file"); \ } \ if ((t_modify & MM_MODIFY) && (i_mm->t->flag & MM_FROZEN)) { \ rb_error_frozen("mmap"); \ } VALUE mm_s_alloc(VALUE obj); VALUE mm_s_new(int argc, VALUE *argv, VALUE obj); VALUE mm_init(VALUE obj, VALUE fname); VALUE mm_aref_m(int argc, VALUE *argv, VALUE obj); VALUE mm_msync(int argc, VALUE *argv, VALUE obj); VALUE mm_unmap(VALUE obj); /* If memory is ever reallocated, any allocated Ruby strings that have not been * garbage collected need to be updated with the new region. If this isn't done, * iterating over the Ruby object space and accessing the string data will seg fault. */ void mm_update(VALUE obj); #endif prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/rendering.c000066400000000000000000000127701442423076500246000ustar00rootroot00000000000000#include "rendering.h" #include #include #include "file_parsing.h" #include "globals.h" #include "utils.h" #ifndef DBL_DECIMAL_DIG #define DBL_DECIMAL_DIG 17 #endif #define LABELS_START_OFFSET 4 static inline int is_valid(const jsmntok_t *token) { return token->start < token->end && token->start >= 0; } static inline int valid_not_null(const entry_t *entry, const jsmntok_t *token) { static const char null_s[] = "null"; if (!is_valid(token)) { return 0; } if (token->type != JSMN_PRIMITIVE) { return 1; } size_t token_len = token->end - token->start; if (token_len < sizeof(null_s) - 1) { return 1; } return strncmp(null_s, entry->json + token->start, sizeof(null_s) - 1) != 0; } static inline int append_token(VALUE string, const entry_t *entry, const jsmntok_t *token) { if (!is_valid(token)) { save_exception(prom_eParsingError, "parsing failed: %s", entry->json); return 0; } rb_str_cat(string, entry->json + token->start, token->end - token->start); return 1; } static int append_labels(VALUE string, const entry_t *entry, const int label_count, const jsmntok_t *tokens) { if (label_count <= 0) { if (is_pid_significant(entry)) { rb_str_cat(string, "{pid=\"", 6); rb_str_append(string, entry->pid); rb_str_cat(string, "\"}", 2); } return 1; } rb_str_cat(string, "{", 1); for (int i = 0; i < label_count; i++) { int key = LABELS_START_OFFSET + i; int val = LABELS_START_OFFSET + label_count + 1 + i; if (!append_token(string, entry, &tokens[key])) { return 0; } rb_str_cat(string, "=", 1); rb_str_cat(string, "\"", 1); if (valid_not_null(entry, &tokens[val])) { append_token(string, entry, &tokens[val]); } rb_str_cat(string, "\"", 1); if (i < label_count - 1) { rb_str_cat(string, ",", 1); } } if (is_pid_significant(entry)) { rb_str_cat(string, ",pid=\"", 6); rb_str_append(string, entry->pid); rb_str_cat(string, "\"", 1); } rb_str_cat(string, "}", 1); return 1; } static int validate_token_count(const int token_count, const entry_t *entry) { if (token_count < 0) { save_exception(prom_eParsingError, "too many labels or malformed json: %s", entry->json); return 0; } if (token_count < LABELS_START_OFFSET) { save_exception(prom_eParsingError, "malformed json: %s", entry->json); return 0; } if ((token_count - (LABELS_START_OFFSET + 1)) % 2 != 0) { save_exception(prom_eParsingError, "mismatched number of labels: %s", entry->json); return 0; } return 1; } static int append_entry(VALUE string, const entry_t *entry) { jsmn_parser parser; jsmn_init(&parser); jsmntok_t tokens[200]; jsmntok_t *name_token = &tokens[2]; int token_count = jsmn_parse(&parser, entry->json, entry->json_size, tokens, sizeof(tokens) / sizeof(tokens[0])); int label_count = (token_count - (LABELS_START_OFFSET + 1)) / 2; // // Example JSON "['metric', 'name',['label_a','label_b'],['value_a', 'value_b']]" // will be parsed into following token list: // // [ "'metric', 'name',['label_a','label_b'],['value_a', 'value_b']", // "metric", "name", // "['label_a','label_b']", "label_a", "label_b", // "['value_a', 'value_b']", "value_a", "value_b" ] // // where 'metric' is the name of the metric, while 'name' // is in summaries and histograms to store names of "submetrics" like: // histogram_name_bucket or histogram_name_sum if (!validate_token_count(token_count, entry)) { return 0; } if (!append_token(string, entry, name_token)) { return 0; } if (!append_labels(string, entry, label_count, tokens)) { return 0; } char value[255]; // print value with highest possible precision so that we do not lose any data int written = snprintf(value, sizeof(value), " %.*g\n", DBL_DECIMAL_DIG, entry->value); rb_str_cat(string, value, written); return 1; } static void append_entry_head(VALUE string, const entry_t *entry) { static const char help_beg[] = "# HELP "; static const char help_fin[] = " Multiprocess metric\n"; rb_str_cat(string, help_beg, sizeof(help_beg) - 1); rb_str_cat(string, entry->name, entry->name_len); rb_str_cat(string, help_fin, sizeof(help_fin) - 1); static const char type_beg[] = "# TYPE "; rb_str_cat(string, type_beg, sizeof(type_beg) - 1); rb_str_cat(string, entry->name, entry->name_len); rb_str_cat(string, " ", 1); rb_str_cat2(string, rb_id2name(entry->type)); rb_str_cat(string, "\n", 1); } static inline int entry_name_equal(const entry_t *a, const entry_t *b) { if (a == NULL || b == NULL) { return a == b; } if (a->name_len != b->name_len) { return 0; } return strncmp(a->name, b->name, a->name_len) == 0; } int entries_to_string(VALUE string, entry_t **sorted_entries, size_t entries_count) { entry_t *previous = NULL; for (size_t i = 0; i < entries_count; i++) { entry_t *entry = sorted_entries[i]; // when entry->name changes write metric header if (!entry_name_equal(previous, entry)) { previous = entry; append_entry_head(string, entry); } if (!append_entry(string, entry)) { return 0; } } return 1; } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/rendering.h000066400000000000000000000002621442423076500245760ustar00rootroot00000000000000#ifndef RENDERING_H #define RENDERING_H #include #include int entries_to_string(VALUE string, entry_t **sorted_entries, size_t entries_count); #endif prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/utils.c000066400000000000000000000031331442423076500237540ustar00rootroot00000000000000#include "utils.h" #include static void rb_save_exception(VALUE exception, VALUE message) { VALUE current_thread = rb_thread_current(); rb_thread_local_aset(current_thread, rb_intern("prometheus_last_exception"), exception); rb_thread_local_aset(current_thread, rb_intern("prometheus_last_exception_message"), message); } /* @deprecated - use with_exception ignoring return value */ void save_exception(VALUE exception, const char *fmt, ...) { va_list args; va_start(args, fmt); VALUE message = rb_vsprintf(fmt, args); rb_save_exception(exception, message); va_end(args); } int with_exception(VALUE exception, const char *fmt, ...) { va_list args; va_start(args, fmt); VALUE message = rb_vsprintf(fmt, args); rb_save_exception(exception, message); va_end(args); return FAILURE; } int with_exception_errno(VALUE exception, const char *fmt, ...) { va_list args; va_start(args, fmt); VALUE message = rb_vsprintf(fmt, args); rb_str_catf(message, " (%s)", strerror(errno)); rb_save_exception(exception, message); va_end(args); return FAILURE; } NORETURN(void raise_last_exception()) { VALUE current_thread = rb_thread_current(); VALUE exception = rb_thread_local_aref(current_thread, rb_intern("prometheus_last_exception")); VALUE message = rb_thread_local_aref(current_thread, rb_intern("prometheus_last_exception_message")); if (exception != Qnil) { rb_raise(exception, "%s", StringValueCStr(message)); } else { rb_raise(rb_eRuntimeError, "no exception found in thread local"); } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/utils.h000066400000000000000000000007711442423076500237660ustar00rootroot00000000000000#ifndef UNUSED_H #define UNUSED_H #include #ifdef UNUSED #elif defined(__GNUC__) #define UNUSED(x) UNUSED_##x __attribute__((unused)) #elif defined(__LCLINT__) #define UNUSED(x) /*@unused@*/ x #else #define UNUSED(x) x #endif #define SUCCESS 1 #define FAILURE 0 NORETURN(void raise_last_exception()); void save_exception(VALUE exception, const char *fmt, ...); int with_exception(VALUE exception, const char *fmt, ...); int with_exception_errno(VALUE exception, const char *fmt, ...); #endif prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/value_access.c000066400000000000000000000141411442423076500252520ustar00rootroot00000000000000#include "value_access.h" #include #include #include #include #include #include "file_format.h" #include "mmap.h" #include "utils.h" static void close_file(mm_ipc *i_mm) { close(i_mm->t->fd); i_mm->t->fd = -1; } static int open_and_extend_file(mm_ipc *i_mm, size_t len) { if (i_mm->t->fd < 0) { int fd; if ((fd = open(i_mm->t->path, i_mm->t->smode)) == -1) { return with_exception_errno(rb_eArgError, "%s: Can't open %s", __FILE__, i_mm->t->path); } i_mm->t->fd = fd; } if (lseek(i_mm->t->fd, len - 1, SEEK_SET) == -1) { close_file(i_mm); return with_exception_errno(rb_eIOError, "Can't lseek %zu", len - 1); } if (write(i_mm->t->fd, "\000", 1) != 1) { close_file(i_mm); return with_exception_errno(rb_eIOError, "Can't extend %s", i_mm->t->path); } i_mm->t->len = len; return SUCCESS; } static int perform_munmap(mm_ipc *i_mm) { if (i_mm->t->addr != NULL && munmap(i_mm->t->addr, i_mm->t->len)) { i_mm->t->addr = NULL; return with_exception_errno(rb_eArgError, "munmap failed"); } i_mm->t->addr = NULL; i_mm->t->len = 0; i_mm->t->real = 0; return SUCCESS; } static int perform_mmap(mm_ipc *i_mm, size_t len) { MMAP_RETTYPE addr = mmap(0, len, i_mm->t->pmode, i_mm->t->vscope, i_mm->t->fd, i_mm->t->offset); if (addr == MAP_FAILED) { return with_exception_errno(rb_eArgError, "mmap failed"); } i_mm->t->addr = addr; i_mm->t->len = len; i_mm->t->real = len; return SUCCESS; } static int expand(VALUE self, mm_ipc *i_mm, size_t len) { if (len < i_mm->t->len) { return with_exception(rb_eArgError, "Can't reduce the size of mmap"); } if (!perform_munmap(i_mm)) { return FAILURE; } if (!open_and_extend_file(i_mm, len)) { return FAILURE; } if (!perform_mmap(i_mm, len)) { close_file(i_mm); return FAILURE; } if ((i_mm->t->flag & MM_LOCK) && mlock(i_mm->t->addr, len) == -1) { return with_exception_errno(rb_eArgError, "mlock(%d)", errno); } mm_update(self); return SUCCESS; } static void save_entry(mm_ipc *i_mm, size_t offset, VALUE key, VALUE value) { uint32_t key_length = (uint32_t)RSTRING_LEN(key); char *pos = (char *)i_mm->t->addr + offset; memcpy(pos, &key_length, sizeof(uint32_t)); pos += sizeof(uint32_t); memmove(pos, StringValuePtr(key), key_length); pos += key_length; memset(pos, ' ', padding_length(key_length)); // TODO: considder padding with /0 pos += padding_length(key_length); double val = NUM2DBL(value); memcpy(pos, &val, sizeof(double)); } static void save_value(mm_ipc *i_mm, VALUE _offset, VALUE value) { Check_Type(_offset, T_FIXNUM); size_t offset = NUM2UINT(_offset); if ((i_mm->t->real + sizeof(double)) <= offset) { rb_raise(rb_eIndexError, "offset %zu out of string", offset); } if (i_mm->t->flag & MM_FROZEN) { rb_error_frozen("mmap"); } char *pos = (char *)i_mm->t->addr + offset; double val = NUM2DBL(value); memcpy(pos, &val, sizeof(double)); } static VALUE load_value(mm_ipc *i_mm, VALUE _offset) { Check_Type(_offset, T_FIXNUM); size_t offset = NUM2UINT(_offset); if ((i_mm->t->real + sizeof(double)) <= offset) { rb_raise(rb_eIndexError, "offset %zu out of string", offset); } char *pos = (char *)i_mm->t->addr + offset; double value; memcpy(&value, pos, sizeof(double)); return DBL2NUM(value); } uint32_t load_used(mm_ipc *i_mm) { uint32_t used = *((uint32_t *)i_mm->t->addr); if (used == 0) { used = START_POSITION; } return used; } void save_used(mm_ipc *i_mm, uint32_t used) { *((uint32_t *)i_mm->t->addr) = used; } static VALUE initialize_entry(VALUE self, mm_ipc *i_mm, VALUE positions, VALUE key, VALUE value) { if (i_mm->t->flag & MM_FROZEN) { rb_error_frozen("mmap"); } if (RSTRING_LEN(key) > INT32_MAX) { rb_raise(rb_eArgError, "string length gt %d", INT32_MAX); } uint32_t key_length = (uint32_t)RSTRING_LEN(key); uint32_t value_offset = sizeof(uint32_t) + key_length + padding_length(key_length); uint32_t entry_length = value_offset + sizeof(double); uint32_t used = load_used(i_mm); while (i_mm->t->len < (used + entry_length)) { if (!expand(self, i_mm, i_mm->t->len * 2)) { raise_last_exception(); } } save_entry(i_mm, used, key, value); save_used(i_mm, used + entry_length); return rb_hash_aset(positions, key, INT2NUM(used + value_offset)); } VALUE method_fetch_entry(VALUE self, VALUE positions, VALUE key, VALUE default_value) { Check_Type(positions, T_HASH); Check_Type(key, T_STRING); mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); VALUE position = rb_hash_lookup(positions, key); if (position != Qnil) { return load_value(i_mm, position); } position = initialize_entry(self, i_mm, positions, key, default_value); return load_value(i_mm, position); } VALUE method_upsert_entry(VALUE self, VALUE positions, VALUE key, VALUE value) { Check_Type(positions, T_HASH); Check_Type(key, T_STRING); mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); VALUE position = rb_hash_lookup(positions, key); if (position != Qnil) { save_value(i_mm, position, value); return load_value(i_mm, position); } position = initialize_entry(self, i_mm, positions, key, value); return load_value(i_mm, position); } VALUE method_load_used(VALUE self) { mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); return UINT2NUM(load_used(i_mm)); } VALUE method_save_used(VALUE self, VALUE value) { Check_Type(value, T_FIXNUM); mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); if (i_mm->t->flag & MM_FROZEN) { rb_error_frozen("mmap"); } if (i_mm->t->len < INITIAL_SIZE) { if (!expand(self, i_mm, INITIAL_SIZE)) { raise_last_exception(); } } save_used(i_mm, NUM2UINT(value)); return value; } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file/value_access.h000066400000000000000000000005721442423076500252620ustar00rootroot00000000000000#ifndef VALUE_ACCESS_H #define VALUE_ACCESS_H #include VALUE method_load_used(VALUE self); VALUE method_save_used(VALUE self, VALUE value); VALUE method_get_double(VALUE self, VALUE index); VALUE method_fetch_entry(VALUE self, VALUE positions, VALUE key, VALUE default_value); VALUE method_upsert_entry(VALUE self, VALUE positions, VALUE key, VALUE value); #endifprometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/000077500000000000000000000000001442423076500231545ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/.cargo/000077500000000000000000000000001442423076500243255ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/.cargo/config.toml000066400000000000000000000012441442423076500264700ustar00rootroot00000000000000[target.aarch64-apple-darwin] # Without this flag, when linking static libruby, the linker removes symbols # (such as `_rb_ext_ractor_safe`) which it thinks are dead code... but they are # not, and they need to be included for the `embed` feature to work with static # Ruby. rustflags = [ "-C", "link-dead-code=on", "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", ] [target.x86_64-apple-darwin] rustflags = [ "-C", "link-dead-code=on", "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", ] [target.aarch64-unknown-linux-gnu] rustflags = [ "-C", "link-dead-code=on" ] [target.x86_64-unknown-linux-gnu] rustflags = [ "-C", "link-dead-code=on" ] prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/Cargo.lock000066400000000000000000000512311442423076500250630ustar00rootroot00000000000000# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "ahash" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ "cfg-if", "once_cell", "version_check", ] [[package]] name = "aho-corasick" version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "bindgen" version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" dependencies = [ "bitflags", "cexpr", "clang-sys", "lazy_static", "lazycell", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", ] [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bstr" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3d4260bcc2e8fc9df1eac4919a720effeb63a3f0952f5bf4944adfa18897f09" dependencies = [ "memchr", "once_cell", "regex-automata", "serde", ] [[package]] name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cexpr" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ "nom", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clang-sys" version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", "libloading", ] [[package]] name = "cpufeatures" version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "digest" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer", "crypto-common", ] [[package]] name = "errno" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d6a0976c999d473fe89ad888d5a284e55366d9dc9038b1ba2aa15128c4afa0" dependencies = [ "errno-dragonfly", "libc", "windows-sys 0.45.0", ] [[package]] name = "errno-dragonfly" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" dependencies = [ "cc", "libc", ] [[package]] name = "fast_mmaped_file_rs" version = "0.1.0" dependencies = [ "bstr", "hashbrown", "indoc", "libc", "magnus", "memmap2", "nix", "rand", "rb-sys", "sha2", "smallvec", "tempfile", "thiserror", ] [[package]] name = "fastrand" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "glob" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "hashbrown" version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ "ahash", ] [[package]] name = "hermit-abi" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" [[package]] name = "indoc" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f2cb48b81b1dc9f39676bf99f5499babfec7cd8fe14307f7b3d747208fb5690" [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "io-lifetimes" version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" dependencies = [ "hermit-abi", "libc", "windows-sys 0.48.0", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "lazycell" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" version = "0.2.141" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" [[package]] name = "libloading" version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", ] [[package]] name = "linux-raw-sys" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" [[package]] name = "magnus" version = "0.5.0" source = "git+https://github.com/matsadler/magnus?branch=main#b10aab48119eb87a872bf0bb4480b1fcebe5d1b9" dependencies = [ "magnus-macros", "rb-sys", "rb-sys-env", "seq-macro", ] [[package]] name = "magnus-macros" version = "0.4.0" source = "git+https://github.com/matsadler/magnus?branch=main#b10aab48119eb87a872bf0bb4480b1fcebe5d1b9" dependencies = [ "proc-macro2", "quote", "syn 2.0.13", ] [[package]] name = "memchr" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memmap2" version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ "libc", ] [[package]] name = "memoffset" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] [[package]] name = "minimal-lexical" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "nix" version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f346ff70e7dbfd675fe90590b92d59ef2de15a8779ae305ebcbfd3f0caf59be4" dependencies = [ "autocfg", "bitflags", "cfg-if", "libc", "memoffset", "pin-utils", ] [[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] [[package]] name = "once_cell" version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "peeking_take_while" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] [[package]] name = "quote" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "rb-sys" version = "0.9.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "156bfedced1e236600bcaad538477097ff2ed5c6b474e411d15b791e1d24c0f1" dependencies = [ "rb-sys-build", ] [[package]] name = "rb-sys-build" version = "0.9.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cb2e4a32cbc290b543a74567072ad24b708aff7bb5dde5a68d5690379cd7938" dependencies = [ "bindgen", "lazy_static", "proc-macro2", "quote", "regex", "shell-words", "syn 1.0.109", ] [[package]] name = "rb-sys-env" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a35802679f07360454b418a5d1735c89716bde01d35b1560fc953c1415a0b3bb" [[package]] name = "redox_syscall" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags", ] [[package]] name = "regex" version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", "regex-syntax", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "rustc-hash" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" version = "0.37.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aef160324be24d31a62147fae491c14d2204a3865c7ca8c3b0d7f7bcb3ea635" dependencies = [ "bitflags", "errno", "io-lifetimes", "libc", "linux-raw-sys", "windows-sys 0.48.0", ] [[package]] name = "seq-macro" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6b44e8fc93a14e66336d230954dda83d18b4605ccace8fe09bc7514a71ad0bc" [[package]] name = "serde" version = "1.0.159" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c04e8343c3daeec41f58990b9d77068df31209f2af111e059e9fe9646693065" [[package]] name = "sha2" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "shell-words" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shlex" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" [[package]] name = "smallvec" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c9da457c5285ac1f936ebd076af6dac17a61cfe7826f2076b4d015cf47bc8ec" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "tempfile" version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", "fastrand", "redox_syscall", "rustix", "windows-sys 0.45.0", ] [[package]] name = "thiserror" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", "syn 2.0.13", ] [[package]] name = "typenum" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "unicode-ident" version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.2", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.0", ] [[package]] name = "windows-targets" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", "windows_i686_gnu 0.48.0", "windows_i686_msvc 0.48.0", "windows_x86_64_gnu 0.48.0", "windows_x86_64_gnullvm 0.48.0", "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/Cargo.toml000066400000000000000000000016321442423076500251060ustar00rootroot00000000000000[package] name = "fast_mmaped_file_rs" version = "0.1.0" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] hashbrown = "0.13" libc = "0.2" magnus = { git = "https://github.com/matsadler/magnus", branch = "main", features = ["rb-sys"] } memmap2 = "0.5" # v0.26 cannot be built on CentOS 7 https://github.com/nix-rust/nix/issues/1972 nix = { version = "0.25", features = ["mman"] } # mman used for MsFlags rb-sys = "0.9" smallvec = "1.10" thiserror = "1.0" [dev-dependencies] bstr = "1.4" indoc = "2.0" # We need the `embed` feature to run tests, but this triggers failures when building as a Gem. magnus = { git = "https://github.com/matsadler/magnus", branch = "main", features = ["rb-sys","embed"] } rand = "0.8" sha2 = "0.10" tempfile = "3.5" [lib] # Integration tests won't work if crate is only `cdylib`. crate-type = ["cdylib","lib"] prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/README.md000066400000000000000000000030601442423076500244320ustar00rootroot00000000000000# Testing ## Running Tests Use [cargo nextest](https://nexte.st/) to execute the Rust unit tests. ```sh $ cargo nextest run ``` ## Why not use 'cargo test'? We need to embed Ruby into the test binary to access Ruby types. This requires us to run `magnus::embed::init()` no more than once before calling Ruby. See [the magnus docs](https://docs.rs/magnus/latest/magnus/embed/fn.init.html) for more details. If we try to create separate `#[test]` functions that call `init()` these will conflict, as Cargo runs tests in parallel using a single process with separate threads. Running `cargo test` will result in errors like: ``` ---- file_info::test::with_ruby stdout ---- thread 'file_info::test::with_ruby' panicked at 'Ruby already initialized' ``` The simplest workaround for this is to avoid using `cargo test` to run unit tests. [nextest](https://nexte.st/) is an alternate test harness that runs each test as its own process, enabling each test to intitialize Ruby without conflict. ## 'symbol not found' errors when running tests If you see errors like the following when running tests: ``` Caused by: for `fast_mmaped_file_rs`, command `/Users/myuser/prometheus-client-mmap/ext/fast_mmaped_file_rs/target/debug/deps/fast_mmaped_file_rs-c81ccc96a6484e04 --list --format terse` exited with signal 6 (SIGABRT) --- stdout: --- stderr: dyld[17861]: symbol not found in flat namespace '_rb_cArray' ``` Clearing the build cache will resolve the problem. ```sh $ cargo clean ``` This is probably due to separate features being used with `magnus` in development builds. prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/extconf.rb000066400000000000000000000014501442423076500251470ustar00rootroot00000000000000require "mkmf" require "rb_sys/mkmf" if find_executable('rustc') create_rust_makefile("fast_mmaped_file_rs") do |r| r.auto_install_rust_toolchain = false if enable_config('fail-on-warning') r.extra_rustflags = ["-Dwarnings"] end if enable_config('debug') r.profile = :dev end if enable_config('address-sanitizer') r.extra_rustflags = ["-Zsanitizer=address"] end # `rb_sys/mkmf` passes all arguments after `--` directly to `cargo rustc`. # We use this awful hack to keep compatibility with existing flags used by # the C implementation. trimmed_argv = ARGV.take_while { |arg| arg != "--" } ARGV = trimmed_argv end else puts 'rustc not found, skipping Rust extension.' File.write('Makefile', dummy_makefile($srcdir).join('')) end prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/000077500000000000000000000000001442423076500237435ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/error.rs000066400000000000000000000133211442423076500254420ustar00rootroot00000000000000use magnus::{exception, Ruby}; use std::any; use std::fmt::Display; use std::io; use std::path::Path; use thiserror::Error; use crate::util; use crate::PROM_EPARSING_ERROR; /// A lightweight representation of Ruby ExceptionClasses. #[derive(PartialEq, Clone, Copy, Debug)] pub enum RubyError { Arg, Encoding, Frozen, Index, Io, NoMem, PromParsing, Runtime, Type, } impl From for magnus::ExceptionClass { fn from(err: RubyError) -> magnus::ExceptionClass { match err { RubyError::Arg => exception::arg_error(), RubyError::Encoding => exception::encoding_error(), RubyError::Frozen => exception::frozen_error(), RubyError::Index => exception::index_error(), RubyError::Io => exception::io_error(), RubyError::NoMem => exception::no_mem_error(), RubyError::Runtime => exception::runtime_error(), RubyError::PromParsing => { // UNWRAP: this will panic if called outside of a Ruby thread. let ruby = Ruby::get().unwrap(); ruby.get_inner(&PROM_EPARSING_ERROR) } RubyError::Type => exception::type_error(), } } } /// Errors returned internally within the crate. Methods called directly by Ruby return /// `magnus::error::Error` as do functions that interact heavily with Ruby. This can be /// converted into a `magnus::error::Error` at the boundary between Rust and Ruby. #[derive(PartialEq, Error, Debug)] pub enum MmapError { /// A read or write was made while another thread had mutable access to the mmap. #[error("read/write operation attempted while mmap was being written to")] ConcurrentAccess, /// An error message used to exactly match the messages returned by the C /// implementation. #[error("{0}")] Legacy(String, RubyError), /// A String had invalid UTF-8 sequences. #[error("{0}")] Encoding(String), /// A failed attempt to cast an integer from one type to another. #[error("failed to cast {object_name} {value} from {from} to {to}")] FailedCast { from: &'static str, to: &'static str, value: String, object_name: String, }, /// The mmap was frozen when a mutable operation was attempted. #[error("mmap")] Frozen, /// An io operation failed. #[error("failed to {operation} path '{path}': {err}")] Io { operation: String, path: String, err: String, }, #[error("string length gt {}", i32::MAX)] KeyLength, /// Failed to allocate memory. #[error("Couldn't allocate for {0} memory")] OutOfMemory(usize), /// A memory operation fell outside of the containers bounds. #[error("offset {index} out of bounds of len {len}")] OutOfBounds { index: String, len: String }, /// A numeric operation overflowed. #[error("overflow when {op} {value} and {added} of type {ty}")] Overflow { value: String, added: String, op: String, ty: &'static str, }, /// A miscellaneous error. #[error("{0}")] Other(String), /// A failure when parsing a `.db` file containing Prometheus metrics. #[error("{0}")] PromParsing(String), /// No mmap open. #[error("unmapped file")] UnmappedFile, /// A custom error message with `strerror(3)` appended. #[error("{0}")] WithErrno(String), } impl MmapError { pub fn legacy>(msg: T, ruby_err: RubyError) -> Self { MmapError::Legacy(msg.into(), ruby_err) } pub fn failed_cast(value: T, object_name: &str) -> Self { MmapError::FailedCast { from: any::type_name::(), to: any::type_name::(), value: value.to_string(), object_name: object_name.to_string(), } } pub fn io(operation: &str, path: &Path, err: io::Error) -> Self { MmapError::Io { operation: operation.to_string(), path: path.display().to_string(), err: err.to_string(), } } pub fn overflowed(value: T, added: T, op: &str) -> Self { MmapError::Overflow { value: value.to_string(), added: added.to_string(), op: op.to_string(), ty: any::type_name::(), } } pub fn out_of_bounds(index: T, len: T) -> Self { MmapError::OutOfBounds { index: index.to_string(), len: len.to_string(), } } pub fn with_errno>(msg: T) -> Self { let strerror = util::strerror(util::errno()); MmapError::WithErrno(format!("{}: ({strerror})", msg.into())) } pub fn ruby_err(&self) -> RubyError { match self { MmapError::ConcurrentAccess => RubyError::Arg, MmapError::Legacy(_, e) => *e, MmapError::Encoding(_) => RubyError::Encoding, MmapError::Io { .. } => RubyError::Io, MmapError::FailedCast { .. } => RubyError::Arg, MmapError::Frozen => RubyError::Frozen, MmapError::KeyLength => RubyError::Arg, MmapError::Overflow { .. } => RubyError::Arg, MmapError::OutOfBounds { .. } => RubyError::Index, MmapError::OutOfMemory { .. } => RubyError::NoMem, MmapError::Other(_) => RubyError::Arg, MmapError::PromParsing(_) => RubyError::PromParsing, MmapError::UnmappedFile => RubyError::Io, MmapError::WithErrno(_) => RubyError::Io, } } } impl From for magnus::error::Error { fn from(err: MmapError) -> magnus::error::Error { magnus::error::Error::new(err.ruby_err().into(), err.to_string()) } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/file_entry.rs000066400000000000000000000503001442423076500264470ustar00rootroot00000000000000use magnus::{StaticSymbol, Symbol}; use std::fmt::Write; use std::str; use crate::error::{MmapError, RubyError}; use crate::file_info::FileInfo; use crate::parser::{self, MetricText}; use crate::raw_entry::RawEntry; use crate::Result; use crate::{SYM_GAUGE, SYM_LIVESUM, SYM_MAX, SYM_MIN}; /// A metrics entry extracted from a `*.db` file. #[derive(Clone, Debug)] pub struct FileEntry { pub data: EntryData, pub meta: EntryMetadata, } /// The primary data payload for a `FileEntry`, the JSON string and the /// associated pid, if significant. Used as the key for `EntryMap`. #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] pub struct EntryData { pub json: String, pub pid: Option, } impl<'a> PartialEq> for EntryData { fn eq(&self, other: &BorrowedData) -> bool { self.pid.as_deref() == other.pid && self.json == other.json } } impl<'a> TryFrom> for EntryData { type Error = MmapError; fn try_from(borrowed: BorrowedData) -> Result { let mut json = String::new(); if json.try_reserve_exact(borrowed.json.len()).is_err() { return Err(MmapError::OutOfMemory(borrowed.json.len())); } json.push_str(borrowed.json); Ok(Self { json, // Don't bother checking for allocation failure, typically ~10 bytes pid: borrowed.pid.map(|p| p.to_string()), }) } } /// A borrowed copy of the JSON string and pid for a `FileEntry`. We use this /// to check if a given string/pid combination is present in the `EntryMap`, /// copying them to owned values only when needed. #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] pub struct BorrowedData<'a> { pub json: &'a str, pub pid: Option<&'a str>, } impl<'a> BorrowedData<'a> { pub fn new( raw_entry: &'a RawEntry, file_info: &'a FileInfo, pid_significant: bool, ) -> Result { let json = str::from_utf8(raw_entry.json()) .map_err(|e| MmapError::Encoding(format!("invalid UTF-8 in entry JSON: {e}")))?; let pid = if pid_significant { Some(file_info.pid.as_str()) } else { None }; Ok(Self { json, pid }) } } /// The metadata associated with a `FileEntry`. The value in `EntryMap`. #[derive(Clone, Debug)] pub struct EntryMetadata { pub multiprocess_mode: Symbol, pub type_: StaticSymbol, pub value: f64, } impl EntryMetadata { /// Construct a new `FileEntry`, copying the JSON string from the `RawEntry` /// into an internal buffer. pub fn new(mmap_entry: &RawEntry, file: &FileInfo) -> Result { let value = mmap_entry.value(); Ok(EntryMetadata { multiprocess_mode: file.multiprocess_mode, type_: file.type_, value, }) } /// Combine values with another `EntryMetadata`. pub fn merge(&mut self, other: &Self) { if self.type_ == SYM_GAUGE { match self.multiprocess_mode { s if s == SYM_MIN => self.value = self.value.min(other.value), s if s == SYM_MAX => self.value = self.value.max(other.value), s if s == SYM_LIVESUM => self.value += other.value, _ => self.value = other.value, } } else { self.value += other.value; } } /// Validate if pid is significant for metric. pub fn is_pid_significant(&self) -> bool { let mp = self.multiprocess_mode; self.type_ == SYM_GAUGE && !(mp == SYM_MIN || mp == SYM_MAX || mp == SYM_LIVESUM) } } impl FileEntry { /// Convert the sorted entries into a String in Prometheus metrics format. pub fn entries_to_string(entries: Vec) -> Result { // We guesstimate that lines are ~100 bytes long, preallocate the string to // roughly that size. let mut out = String::new(); out.try_reserve(entries.len() * 128) .map_err(|_| MmapError::OutOfMemory(entries.len() * 128))?; let mut prev_name: Option = None; let entry_count = entries.len(); let mut processed_count = 0; for entry in entries { let metrics_data = match parser::parse_metrics(&entry.data.json) { Some(m) => m, // We don't exit the function here so the total number of invalid // entries can be calculated below. None => continue, }; match prev_name.as_ref() { Some(p) if p == metrics_data.family_name => {} _ => { entry.append_header(metrics_data.family_name, &mut out); prev_name = Some(metrics_data.family_name.to_owned()); } } entry.append_entry(metrics_data, &mut out)?; writeln!(&mut out, " {}", entry.meta.value) .map_err(|e| MmapError::Other(format!("Failed to append to output: {e}")))?; processed_count += 1; } if processed_count != entry_count { return Err(MmapError::legacy( format!("Processed entries {processed_count} != map entries {entry_count}"), RubyError::Runtime, )); } Ok(out) } fn append_header(&self, family_name: &str, out: &mut String) { out.push_str("# HELP "); out.push_str(family_name); out.push_str(" Multiprocess metric\n"); out.push_str("# TYPE "); out.push_str(family_name); out.push(' '); out.push_str(self.meta.type_.name().expect("name was invalid UTF-8")); out.push('\n'); } fn append_entry(&self, json_data: MetricText, out: &mut String) -> Result<()> { out.push_str(json_data.metric_name); if json_data.labels.is_empty() { if let Some(pid) = self.data.pid.as_ref() { out.push_str("{pid=\""); out.push_str(pid); out.push_str("\"}"); } return Ok(()); } out.push('{'); let it = json_data.labels.iter().zip(json_data.values.iter()); for (i, (&key, &val)) in it.enumerate() { out.push_str(key); out.push_str("=\""); // `null` values will be output as `""`. if val != "null" { out.push_str(val); } out.push('"'); if i < json_data.labels.len() - 1 { out.push(','); } } if let Some(pid) = self.data.pid.as_ref() { out.push_str(",pid=\""); out.push_str(pid); out.push('"'); } out.push('}'); Ok(()) } } #[cfg(test)] mod test { use bstr::BString; use indoc::indoc; use super::*; use crate::file_info::FileInfo; use crate::raw_entry::RawEntry; use crate::testhelper::{TestEntry, TestFile}; #[test] fn test_entries_to_string() { struct TestCase { name: &'static str, multiprocess_mode: &'static str, json: &'static [&'static str], values: &'static [f64], pids: &'static [&'static str], expected_out: Option<&'static str>, expected_err: Option, } let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let tc = vec![ TestCase { name: "one metric, pid significant", multiprocess_mode: "all", json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#], values: &[1.0], pids: &["worker-1"], expected_out: Some(indoc! {r##"# HELP family Multiprocess metric # TYPE family gauge name{label_a="value_a",label_b="value_b",pid="worker-1"} 1 "##}), expected_err: None, }, TestCase { name: "one metric, no pid", multiprocess_mode: "min", json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#], values: &[1.0], pids: &["worker-1"], expected_out: Some(indoc! {r##"# HELP family Multiprocess metric # TYPE family gauge name{label_a="value_a",label_b="value_b"} 1 "##}), expected_err: None, }, TestCase { name: "floating point shown", multiprocess_mode: "min", json: &[r#"["family","name",["label_a","label_b"],["value_a","value_b"]]"#], values: &[1.5], pids: &["worker-1"], expected_out: Some(indoc! {r##"# HELP family Multiprocess metric # TYPE family gauge name{label_a="value_a",label_b="value_b"} 1.5 "##}), expected_err: None, }, TestCase { name: "no labels, pid significant", multiprocess_mode: "all", json: &[r#"["family","name",[],[]]"#], values: &[1.0], pids: &["worker-1"], expected_out: Some(indoc! {r##"# HELP family Multiprocess metric # TYPE family gauge name{pid="worker-1"} 1 "##}), expected_err: None, }, TestCase { name: "no labels, no pid", multiprocess_mode: "min", json: &[r#"["family","name",[],[]]"#], values: &[1.0], pids: &["worker-1"], expected_out: Some(indoc! {r##"# HELP family Multiprocess metric # TYPE family gauge name 1 "##}), expected_err: None, }, TestCase { name: "two metrics, same family, pid significant", multiprocess_mode: "all", json: &[ r#"["family","first",["label_a","label_b"],["value_a","value_b"]]"#, r#"["family","second",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0, 2.0], pids: &["worker-1", "worker-1"], expected_out: Some(indoc! {r##"# HELP family Multiprocess metric # TYPE family gauge first{label_a="value_a",label_b="value_b",pid="worker-1"} 1 second{label_a="value_a",label_b="value_b",pid="worker-1"} 2 "##}), expected_err: None, }, TestCase { name: "two metrics, different family, pid significant", multiprocess_mode: "min", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, r#"["second_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0, 2.0], pids: &["worker-1", "worker-1"], expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric # TYPE first_family gauge first_name{label_a="value_a",label_b="value_b"} 1 # HELP second_family Multiprocess metric # TYPE second_family gauge second_name{label_a="value_a",label_b="value_b"} 2 "##}), expected_err: None, }, TestCase { name: "three metrics, two different families, pid significant", multiprocess_mode: "all", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, r#"["first_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#, r#"["second_family","second_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0, 2.0, 3.0], pids: &["worker-1", "worker-1", "worker-1"], expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric # TYPE first_family gauge first_name{label_a="value_a",label_b="value_b",pid="worker-1"} 1 second_name{label_a="value_a",label_b="value_b",pid="worker-1"} 2 # HELP second_family Multiprocess metric # TYPE second_family gauge second_name{label_a="value_a",label_b="value_b",pid="worker-1"} 3 "##}), expected_err: None, }, TestCase { name: "same metrics, pid significant, separate workers", multiprocess_mode: "all", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0, 2.0], pids: &["worker-1", "worker-2"], expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric # TYPE first_family gauge first_name{label_a="value_a",label_b="value_b",pid="worker-1"} 1 first_name{label_a="value_a",label_b="value_b",pid="worker-2"} 2 "##}), expected_err: None, }, TestCase { name: "same metrics, pid not significant, separate workers", multiprocess_mode: "max", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0, 2.0], pids: &["worker-1", "worker-2"], expected_out: Some(indoc! {r##"# HELP first_family Multiprocess metric # TYPE first_family gauge first_name{label_a="value_a",label_b="value_b"} 1 first_name{label_a="value_a",label_b="value_b"} 2 "##}), expected_err: None, }, TestCase { name: "entry fails to parse", multiprocess_mode: "min", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, r#"[not valid"#, ], values: &[1.0, 2.0], pids: &["worker-1", "worker-1"], expected_out: None, expected_err: Some(MmapError::legacy( "Processed entries 1 != map entries 2".to_owned(), RubyError::Runtime, )), }, ]; for case in tc { let name = case.name; let input_bytes: Vec = case .json .iter() .zip(case.values) .map(|(&s, &value)| TestEntry::new(s, value).as_bstring()) .collect(); let mut file_infos = Vec::new(); for pid in case.pids { let TestFile { file, path, dir: _dir, } = TestFile::new(b"foobar"); let info = FileInfo { file, path, len: case.json.len(), multiprocess_mode: Symbol::new(case.multiprocess_mode), type_: StaticSymbol::new("gauge"), pid: pid.to_string(), }; file_infos.push(info); } let file_entries: Vec = input_bytes .iter() .map(|s| RawEntry::from_slice(s).unwrap()) .zip(file_infos) .map(|(entry, info)| { let meta = EntryMetadata::new(&entry, &info).unwrap(); let borrowed = BorrowedData::new(&entry, &info, meta.is_pid_significant()).unwrap(); let data = EntryData::try_from(borrowed).unwrap(); FileEntry { data, meta } }) .collect(); let output = FileEntry::entries_to_string(file_entries); if let Some(expected_out) = case.expected_out { assert_eq!( expected_out, output.as_ref().unwrap(), "test case: {name} - output" ); } if let Some(expected_err) = case.expected_err { assert_eq!( expected_err, output.unwrap_err(), "test case: {name} - error" ); } } } #[test] fn test_merge() { struct TestCase { name: &'static str, metric_type: &'static str, multiprocess_mode: &'static str, values: &'static [f64], expected_value: f64, } let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let tc = vec![ TestCase { name: "gauge max", metric_type: "gauge", multiprocess_mode: "max", values: &[1.0, 5.0], expected_value: 5.0, }, TestCase { name: "gauge min", metric_type: "gauge", multiprocess_mode: "min", values: &[1.0, 5.0], expected_value: 1.0, }, TestCase { name: "gauge livesum", metric_type: "gauge", multiprocess_mode: "livesum", values: &[1.0, 5.0], expected_value: 6.0, }, TestCase { name: "gauge all", metric_type: "gauge", multiprocess_mode: "all", values: &[1.0, 5.0], expected_value: 5.0, }, TestCase { name: "not a gauge", metric_type: "histogram", multiprocess_mode: "max", values: &[1.0, 5.0], expected_value: 6.0, }, ]; for case in tc { let name = case.name; let json = r#"["family","metric",["label_a","label_b"],["value_a","value_b"]]"#; let TestFile { file, path, dir: _dir, } = TestFile::new(b"foobar"); let info = FileInfo { file, path, len: json.len(), multiprocess_mode: Symbol::new(case.multiprocess_mode), type_: StaticSymbol::new(case.metric_type), pid: "worker-1".to_string(), }; let input_bytes: Vec = case .values .iter() .map(|&value| TestEntry::new(json, value).as_bstring()) .collect(); let entries: Vec = input_bytes .iter() .map(|s| RawEntry::from_slice(s).unwrap()) .map(|entry| { let meta = EntryMetadata::new(&entry, &info).unwrap(); let borrowed = BorrowedData::new(&entry, &info, meta.is_pid_significant()).unwrap(); let data = EntryData::try_from(borrowed).unwrap(); FileEntry { data, meta } }) .collect(); let mut entry_a = entries[0].clone(); let entry_b = entries[1].clone(); entry_a.meta.merge(&entry_b.meta); assert_eq!( case.expected_value, entry_a.meta.value, "test case: {name} - value" ); } } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/file_info.rs000066400000000000000000000131571442423076500262520ustar00rootroot00000000000000use magnus::exception::*; use magnus::{Error, RString, StaticSymbol, Symbol, Value}; use std::ffi::OsString; use std::fs::File; use std::io::{self, Read, Seek}; use std::os::unix::ffi::OsStringExt; use std::path::PathBuf; use crate::err; use crate::error::{MmapError, RubyError}; use crate::util; use crate::Result; /// The details of a `*.db` file. #[derive(Debug)] pub struct FileInfo { pub file: File, pub path: PathBuf, pub len: usize, pub multiprocess_mode: Symbol, pub type_: StaticSymbol, pub pid: String, } impl FileInfo { /// Receive the details of a file from Ruby and store as a `FileInfo`. pub fn open_from_params(params: &[Value; 4]) -> magnus::error::Result { if params.len() != 4 { return Err(err!( arg_error(), "wrong number of arguments {} instead of 4", params.len() )); } let filepath = RString::from_value(params[0]) .ok_or_else(|| err!(arg_error(), "can't convert filepath to String"))?; // SAFETY: We immediately copy the string buffer from Ruby, preventing // it from being mutated out from under us. let path_bytes: Vec<_> = unsafe { filepath.as_slice().to_owned() }; let path = PathBuf::from(OsString::from_vec(path_bytes)); let mut file = File::open(&path).map_err(|_| { err!( arg_error(), "Can't open {}, errno: {}", path.display(), util::errno() ) })?; let stat = file .metadata() .map_err(|_| err!(io_error(), "Can't stat file, errno: {}", util::errno()))?; let length = util::cast_chk::<_, usize>(stat.len(), "file size")?; let multiprocess_mode = Symbol::from_value(params[1]) .ok_or_else(|| err!(arg_error(), "expected multiprocess_mode to be a symbol"))?; let type_ = StaticSymbol::from_value(params[2]) .ok_or_else(|| err!(arg_error(), "expected file type to be a symbol"))?; let pid = RString::from_value(params[3]) .ok_or_else(|| err!(arg_error(), "expected pid to be a String"))?; file.rewind() .map_err(|_| err!(io_error(), "Can't fseek 0, errno: {}", util::errno()))?; Ok(Self { file, path, len: length, multiprocess_mode, type_, pid: pid.to_string()?, }) } /// Read the contents of the associated file into the buffer provided by /// the caller. pub fn read_from_file(&mut self, buf: &mut Vec) -> Result<()> { buf.clear(); buf.try_reserve(self.len).map_err(|_| { MmapError::legacy( format!("Can't malloc {}, errno: {}", self.len, util::errno()), RubyError::Io, ) })?; match self.file.read_to_end(buf) { Ok(n) if n == self.len => Ok(()), Ok(_) => Err(MmapError::io( "read", &self.path, io::Error::from(io::ErrorKind::UnexpectedEof), )), Err(e) => Err(MmapError::io("read", &self.path, e)), } } } #[cfg(test)] mod test { use magnus::{eval, RArray, StaticSymbol, Symbol}; use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; use super::*; use crate::testhelper::TestFile; #[test] fn test_open_from_params() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let file_data = b"foobar"; let TestFile { file: _file, path, dir: _dir, } = TestFile::new(file_data); let pid = "worker-1_0"; let args = RArray::from_value( eval(&format!("['{}', :max, :gauge, '{pid}']", path.display())).unwrap(), ) .unwrap(); let arg0 = args.shift().unwrap(); let arg1 = args.shift().unwrap(); let arg2 = args.shift().unwrap(); let arg3 = args.shift().unwrap(); let out = FileInfo::open_from_params(&[arg0, arg1, arg2, arg3]); assert!(out.is_ok()); let out = out.unwrap(); assert_eq!(out.path, path); assert_eq!(out.len, file_data.len()); assert_eq!(out.multiprocess_mode, Symbol::new("max")); assert_eq!(out.type_, Symbol::new("gauge")); assert_eq!(out.pid, pid); } #[test] fn test_read_from_file() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); const BUF_LEN: usize = 1 << 20; // 1MiB // Create a buffer with random data. let mut buf = vec![0u8; BUF_LEN]; thread_rng().fill(buf.as_mut_slice()); let TestFile { file, path, dir: _dir, } = TestFile::new(&buf); let mut info = FileInfo { file, path, len: buf.len(), multiprocess_mode: Symbol::new("puma"), type_: StaticSymbol::new("max"), pid: "worker-0_0".to_string(), }; let mut out_buf = Vec::new(); info.read_from_file(&mut out_buf).unwrap(); assert_eq!(buf.len(), out_buf.len(), "buffer lens"); let mut in_hasher = Sha256::new(); in_hasher.update(&buf); let in_hash = in_hasher.finalize(); let mut out_hasher = Sha256::new(); out_hasher.update(&out_buf); let out_hash = out_hasher.finalize(); assert_eq!(in_hash, out_hash, "content hashes"); } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/lib.rs000066400000000000000000000052571442423076500250700ustar00rootroot00000000000000use magnus::exception::*; use magnus::prelude::*; use magnus::value::{Fixnum, Lazy, LazyId}; use magnus::{class, define_class, exception, function, method, Ruby}; use std::mem::size_of; use crate::mmap::MmapedFile; pub mod error; pub mod file_entry; pub mod file_info; mod macros; pub mod map; pub mod mmap; pub mod parser; pub mod raw_entry; pub mod util; #[cfg(test)] mod testhelper; type Result = std::result::Result; const MAP_SHARED: i64 = libc::MAP_SHARED as i64; const HEADER_SIZE: usize = 2 * size_of::(); static SYM_GAUGE: LazyId = LazyId::new("gauge"); static SYM_MIN: LazyId = LazyId::new("min"); static SYM_MAX: LazyId = LazyId::new("max"); static SYM_LIVESUM: LazyId = LazyId::new("livesum"); static SYM_PID: LazyId = LazyId::new("pid"); static SYM_SAMPLES: LazyId = LazyId::new("samples"); static PROM_EPARSING_ERROR: Lazy = Lazy::new(|_| { let prom_err = define_class( "PrometheusParsingError", exception::runtime_error().as_r_class(), ) .expect("failed to create class `PrometheusParsingError`"); ExceptionClass::from_value(prom_err.as_value()) .expect("failed to create exception class from `PrometheusParsingError`") }); #[magnus::init] fn init(ruby: &Ruby) -> magnus::error::Result<()> { // Initialize the static symbols LazyId::force(&SYM_GAUGE, ruby); LazyId::force(&SYM_MIN, ruby); LazyId::force(&SYM_MAX, ruby); LazyId::force(&SYM_LIVESUM, ruby); LazyId::force(&SYM_PID, ruby); LazyId::force(&SYM_SAMPLES, ruby); // Initialize `PrometheusParsingError` class. Lazy::force(&PROM_EPARSING_ERROR, ruby); let klass = define_class("FastMmapedFileRs", class::object())?; klass.undef_default_alloc_func(); // UNWRAP: We know `MAP_SHARED` fits in a `Fixnum`. klass.const_set("MAP_SHARED", Fixnum::from_i64(MAP_SHARED).unwrap())?; klass.define_singleton_method("to_metrics", function!(MmapedFile::to_metrics, 1))?; // Required for subclassing to work klass.define_alloc_func::(); klass.define_singleton_method("new", method!(MmapedFile::new, -1))?; klass.define_method("initialize", method!(MmapedFile::initialize, 1))?; klass.define_method("slice", method!(MmapedFile::slice, -1))?; klass.define_method("sync", method!(MmapedFile::sync, -1))?; klass.define_method("munmap", method!(MmapedFile::munmap, 0))?; klass.define_method("used", method!(MmapedFile::load_used, 0))?; klass.define_method("used=", method!(MmapedFile::save_used, 1))?; klass.define_method("fetch_entry", method!(MmapedFile::fetch_entry, 3))?; klass.define_method("upsert_entry", method!(MmapedFile::upsert_entry, 3))?; Ok(()) } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/macros.rs000066400000000000000000000005471442423076500256030ustar00rootroot00000000000000#[macro_export] macro_rules! err { (with_errno: $err_t:expr, $($arg:expr),*) => { { let err = format!($($arg),*); let strerror = strerror(errno()); Error::new($err_t, format!("{err} ({strerror})")) } }; ($err_t:expr, $($arg:expr),*) => { Error::new($err_t, format!($($arg),*)) }; } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/map.rs000066400000000000000000000405001442423076500250650ustar00rootroot00000000000000use hashbrown::hash_map::RawEntryMut; use hashbrown::HashMap; use magnus::{exception::*, Error, RArray}; use std::hash::{BuildHasher, Hash, Hasher}; use std::mem::size_of; use crate::error::MmapError; use crate::file_entry::{BorrowedData, EntryData, EntryMetadata, FileEntry}; use crate::file_info::FileInfo; use crate::raw_entry::RawEntry; use crate::util::read_u32; use crate::Result; use crate::{err, HEADER_SIZE}; /// A HashMap of JSON strings and their associated metadata. /// Used to print metrics in text format. /// /// The map key is the entry's JSON string and an optional pid string. The latter /// allows us to have multiple entries on the map for multiple pids using the /// same string. #[derive(Default, Debug)] pub struct EntryMap(HashMap); impl EntryMap { /// Construct a new EntryMap. pub fn new() -> Self { Self(HashMap::new()) } /// Given a list of files, read each one into memory and parse the metrics it contains. pub fn aggregate_files(&mut self, list_of_files: RArray) -> magnus::error::Result<()> { // Pre-allocate the `HashMap` and validate we don't OOM. The C implementation // ignores allocation failures here. We perform this check to avoid potential // panics. We assume ~1,000 entries per file, so 72 KiB allocated per file. self.0 .try_reserve(list_of_files.len() * 1024) .map_err(|_| { err!( no_mem_error(), "Couldn't allocate for {} memory", size_of::() * list_of_files.len() * 1024 ) })?; // We expect file sizes between 4KiB and 4MiB. Pre-allocate 16KiB to reduce reallocations // a bit. let mut buf = Vec::new(); buf.try_reserve(16_384) .map_err(|_| err!(no_mem_error(), "Couldn't allocate for {} memory", 16_384))?; for item in list_of_files.each() { let params = RArray::from_value(item?).expect("file list was not a Ruby Array"); if params.len() != 4 { return Err(err!( arg_error(), "wrong number of arguments {} instead of 4", params.len() )); } let params = params.to_value_array::<4>()?; let mut file_info = FileInfo::open_from_params(¶ms)?; file_info.read_from_file(&mut buf)?; self.process_buffer(file_info, &buf)?; } Ok(()) } /// Consume the `EntryMap` and convert the key/value into`FileEntry` /// objects, sorting them by their JSON strings. pub fn into_sorted(self) -> Result> { let mut sorted = Vec::new(); // To match the behavior of the C version, pre-allocate the entries // and check for allocation failure. Generally idiomatic Rust would // `collect` the iterator into a new `Vec` in place, but this panics // if it can't allocate and we want to continue execution in that // scenario. if sorted.try_reserve_exact(self.0.len()).is_err() { return Err(MmapError::OutOfMemory( self.0.len() * size_of::(), )); } sorted.extend( self.0 .into_iter() .map(|(data, meta)| FileEntry { data, meta }), ); sorted.sort_unstable_by(|x, y| x.data.cmp(&y.data)); Ok(sorted) } /// Check if the `EntryMap` already contains the JSON string. /// If yes, update the associated value, if not insert the /// entry into the map. pub fn merge_or_store(&mut self, data: BorrowedData, meta: EntryMetadata) -> Result<()> { // Manually hash the `BorrowedData` and perform an equality check on the // key. This allows us to perform the comparison without allocating a // new `EntryData` that may not be needed. let mut state = self.0.hasher().build_hasher(); data.hash(&mut state); let hash = state.finish(); match self.0.raw_entry_mut().from_hash(hash, |k| k == &data) { RawEntryMut::Vacant(entry) => { // Allocate a new `EntryData` as the JSON/pid combination is // not present in the map. let owned = EntryData::try_from(data)?; entry.insert(owned, meta); } RawEntryMut::Occupied(mut entry) => { let existing = entry.get_mut(); existing.merge(&meta); } } Ok(()) } /// Parse metrics data from a `.db` file and store in the `EntryMap`. fn process_buffer(&mut self, file_info: FileInfo, source: &[u8]) -> Result<()> { if source.len() < HEADER_SIZE { // Nothing to read, OK. return Ok(()); } // CAST: no-op on 32-bit, widening on 64-bit. let used = read_u32(source, 0)? as usize; if used > source.len() { return Err(MmapError::PromParsing(format!( "source file {} corrupted, used {used} > file size {}", file_info.path.display(), source.len() ))); } let mut pos = HEADER_SIZE; while pos + size_of::() < used { let raw_entry = RawEntry::from_slice(&source[pos..used])?; if pos + raw_entry.total_len() > used { return Err(MmapError::PromParsing(format!( "source file {} corrupted, used {used} < stored data length {}", file_info.path.display(), pos + raw_entry.total_len() ))); } let meta = EntryMetadata::new(&raw_entry, &file_info)?; let data = BorrowedData::new(&raw_entry, &file_info, meta.is_pid_significant())?; self.merge_or_store(data, meta)?; pos += raw_entry.total_len(); } Ok(()) } } #[cfg(test)] mod test { use magnus::{StaticSymbol, Symbol}; use std::mem; use super::*; use crate::file_entry::FileEntry; use crate::testhelper::{self, TestFile}; impl EntryData { /// A helper function for tests to convert owned data to references. fn as_borrowed(&self) -> BorrowedData { BorrowedData { json: &self.json, pid: self.pid.as_deref(), } } } #[test] fn test_into_sorted() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let entries = vec![ FileEntry { data: EntryData { json: "zzzzzz".to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("max"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }, FileEntry { data: EntryData { json: "zzz".to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("max"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }, FileEntry { data: EntryData { json: "zzzaaa".to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("max"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }, FileEntry { data: EntryData { json: "aaa".to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("max"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }, FileEntry { data: EntryData { json: "ooo".to_string(), pid: Some("worker-1_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("all"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }, FileEntry { data: EntryData { json: "ooo".to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("all"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }, ]; let mut map = EntryMap::new(); for entry in entries { map.0.insert(entry.data, entry.meta); } let result = map.into_sorted(); assert!(result.is_ok()); let sorted = result.unwrap(); assert_eq!(sorted.len(), 6); assert_eq!(sorted[0].data.json, "aaa"); assert_eq!(sorted[1].data.json, "ooo"); assert_eq!(sorted[1].data.pid.as_deref(), Some("worker-0_0")); assert_eq!(sorted[2].data.json, "ooo"); assert_eq!(sorted[2].data.pid.as_deref(), Some("worker-1_0")); assert_eq!(sorted[3].data.json, "zzz"); assert_eq!(sorted[4].data.json, "zzzaaa"); assert_eq!(sorted[5].data.json, "zzzzzz"); } #[test] fn test_merge_or_store() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let key = "foobar"; let starting_entry = FileEntry { data: EntryData { json: key.to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("all"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }; let matching_entry = FileEntry { data: EntryData { json: key.to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("all"), type_: StaticSymbol::new("gauge"), value: 5.0, }, }; let same_key_different_worker = FileEntry { data: EntryData { json: key.to_string(), pid: Some("worker-1_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("all"), type_: StaticSymbol::new("gauge"), value: 100.0, }, }; let unmatched_entry = FileEntry { data: EntryData { json: "another key".to_string(), pid: Some("worker-0_0".to_string()), }, meta: EntryMetadata { multiprocess_mode: Symbol::new("all"), type_: StaticSymbol::new("gauge"), value: 1.0, }, }; let mut map = EntryMap::new(); map.0 .insert(starting_entry.data.clone(), starting_entry.meta.clone()); let matching_borrowed = matching_entry.data.as_borrowed(); map.merge_or_store(matching_borrowed, matching_entry.meta) .unwrap(); assert_eq!( 5.0, map.0.get(&starting_entry.data).unwrap().value, "value updated" ); assert_eq!(1, map.0.len(), "no entry added"); let same_key_different_worker_borrowed = same_key_different_worker.data.as_borrowed(); map.merge_or_store( same_key_different_worker_borrowed, same_key_different_worker.meta, ) .unwrap(); assert_eq!( 5.0, map.0.get(&starting_entry.data).unwrap().value, "value unchanged" ); assert_eq!(2, map.0.len(), "additional entry added"); let unmatched_entry_borrowed = unmatched_entry.data.as_borrowed(); map.merge_or_store(unmatched_entry_borrowed, unmatched_entry.meta) .unwrap(); assert_eq!( 5.0, map.0.get(&starting_entry.data).unwrap().value, "value unchanged" ); assert_eq!(3, map.0.len(), "entry added"); } #[test] fn test_process_buffer() { struct TestCase { name: &'static str, json: &'static [&'static str], values: &'static [f64], used: Option, expected_ct: usize, expected_err: Option, } let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let tc = vec![ TestCase { name: "single entry", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0], used: None, expected_ct: 1, expected_err: None, }, TestCase { name: "multiple entries", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, r#"["second_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0, 2.0], used: None, expected_ct: 2, expected_err: None, }, TestCase { name: "empty", json: &[], values: &[], used: None, expected_ct: 0, expected_err: None, }, TestCase { name: "used too long", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0], used: Some(9999), expected_ct: 0, expected_err: Some(MmapError::PromParsing(String::new())), }, TestCase { name: "used too short", json: &[ r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#, ], values: &[1.0], used: Some(15), expected_ct: 0, expected_err: Some(MmapError::out_of_bounds(88, 7)), }, ]; for case in tc { let name = case.name; let input_bytes = testhelper::entries_to_db(case.json, case.values, case.used); let TestFile { file, path, dir: _dir, } = TestFile::new(&input_bytes); let info = FileInfo { file, path, len: case.json.len(), multiprocess_mode: Symbol::new("max"), type_: StaticSymbol::new("gauge"), pid: "worker-1".to_string(), }; let mut map = EntryMap::new(); let result = map.process_buffer(info, &input_bytes); assert_eq!(case.expected_ct, map.0.len(), "test case: {name} - count"); if let Some(expected_err) = case.expected_err { // Validate we have the right enum type for the error. Error // messages contain the temp dir path and can't be predicted // exactly. assert_eq!( mem::discriminant(&expected_err), mem::discriminant(&result.unwrap_err()), "test case: {name} - failure" ); } else { assert_eq!(Ok(()), result, "test case: {name} - success"); assert_eq!( case.json.len(), map.0.len(), "test case: {name} - all entries captured" ); } } } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/mmap.rs000066400000000000000000000622141442423076500252500ustar00rootroot00000000000000use magnus::exception::*; use magnus::prelude::*; use magnus::rb_sys::{AsRawValue, FromRawValue}; use magnus::typed_data::Obj; use magnus::value::Fixnum; use magnus::{block::Proc, eval, scan_args, Error, Integer, RArray, RClass, RHash, RString, Value}; use nix::libc::c_long; use rb_sys::rb_str_new_static; use std::fs::File; use std::io::{prelude::*, SeekFrom}; use std::mem; use std::path::Path; use std::ptr::NonNull; use std::sync::RwLock; use crate::err; use crate::error::MmapError; use crate::file_entry::FileEntry; use crate::map::EntryMap; use crate::raw_entry::RawEntry; use crate::util::{self, CheckedOps}; use crate::Result; use crate::HEADER_SIZE; use inner::InnerMmap; mod inner; /// A Rust struct wrapped in a Ruby object, providing access to a memory-mapped /// file used to store, update, and read out Prometheus metrics. /// /// - File format: /// - Header: /// - 4 bytes: u32 - total size of metrics in file. /// - 4 bytes: NUL byte padding. /// - Repeating metrics entries: /// - 4 bytes: u32 - entry JSON string size. /// - `N` bytes: UTF-8 encoded JSON string used as entry key. /// - (8 - (4 + `N`) % 8) bytes: 1 to 8 padding space (0x20) bytes to /// reach 8-byte alignment. /// - 8 bytes: f64 - entry value. /// /// All numbers are saved in native-endian format. /// /// Generated via [luismartingarcia/protocol](https://github.com/luismartingarcia/protocol): /// /// /// ``` /// protocol "Used:4,Pad:4,K1 Size:4,K1 Name:4,K1 Value:8,K2 Size:4,K2 Name:4,K2 Value:8" /// /// 0 1 2 3 /// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 /// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /// | Used | Pad |K1 Size|K1 Name| K1 Value |K2 Size|K2 Name| /// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /// | K2 Value | /// +-+-+-+-+-+-+-+ /// ``` // // The API imposed by `magnus` requires all methods to use shared borrows. // This means we can't store any mutable state in the top-level struct, // and must store the interior data behind a `RwLock`, which adds run-time // checks that mutable operations have no concurrent read or writes. // // We are further limited by the need to support subclassing in Ruby, which // requires us to define an allocation function for the class, the // `magnus::class::define_alloc_func()` function. This needs a support the // `Default` trait, so a `File` cannot directly help by the object being // constructed. Having the `RwLock` hold an `Option` of the interior object // resolves this. #[derive(Debug, Default)] #[magnus::wrap(class = "FastMmapedFileRs", free_immediately, size)] pub struct MmapedFile(RwLock>); impl MmapedFile { /// call-seq: /// new(file) /// /// create a new Mmap object /// /// * file /// /// /// Creates a mapping that's shared with all other processes /// mapping the same area of the file. pub fn new(klass: RClass, args: &[Value]) -> magnus::error::Result> { let args = scan_args::scan_args::<(RString,), (), (), (), (), ()>(args)?; let path = args.required.0; let lock = MmapedFile(RwLock::new(None)); let obj = Obj::wrap_as(lock, klass); let _: Value = obj.funcall("initialize", (path,))?; Ok(obj) } /// Initialize a new `FastMmapedFileRs` object. This must be defined in /// order for inheritance to work. pub fn initialize(rb_self: Obj, fname: String) -> magnus::error::Result<()> { let file = File::options() .read(true) .write(true) .open(&fname) .map_err(|_| err!(arg_error(), "Can't open {}", fname))?; let inner = InnerMmap::new(fname.into(), file)?; rb_self.insert_inner(inner)?; let weak_klass = RClass::from_value(eval("ObjectSpace::WeakMap")?) .ok_or_else(|| err!(no_method_error(), "unable to create WeakMap"))?; let weak_obj_tracker = weak_klass.new_instance(())?; // We will need to iterate over strings backed by the mmapped file, but // don't want to prevent the GC from reaping them when the Ruby code // has finished with them. `ObjectSpace::WeakMap` allows us to track // them without extending their lifetime. // // https://ruby-doc.org/core-3.0.0/ObjectSpace/WeakMap.html rb_self.ivar_set("@weak_obj_tracker", weak_obj_tracker)?; Ok(()) } /// Read the list of files provided from Ruby and convert them to a Prometheus /// metrics String. pub fn to_metrics(file_list: RArray) -> magnus::error::Result { let mut map = EntryMap::new(); map.aggregate_files(file_list)?; let sorted = map.into_sorted()?; FileEntry::entries_to_string(sorted).map_err(|e| e.into()) } /// Document-method: [] /// Document-method: slice /// /// call-seq: [](args) /// /// Element reference - with the following syntax: /// /// self[nth] /// /// retrieve the nth character /// /// self[start..last] /// /// return a substring from start to last /// /// self[start, length] /// /// return a substring of lenght characters from start pub fn slice(rb_self: Obj, args: &[Value]) -> magnus::error::Result { // The C implemenation would trigger a GC cycle via `rb_gc_force_recycle` // if the `MM_PROTECT` flag is set, but in practice this is never used. // We omit this logic, particularly because `rb_gc_force_recycle` is a // no-op as of Ruby 3.1. let str = (*rb_self).str(rb_self)?; let res = str.funcall("[]", args); // The C implementation does this, perhaps to validate that the len we // provided is actually being used. (*rb_self).inner_mut(|inner| { inner.set_len(str.len()); Ok(()) })?; res } /// Document-method: msync /// Document-method: sync /// Document-method: flush /// /// call-seq: msync /// /// flush the file pub fn sync(&self, args: &[Value]) -> magnus::error::Result<()> { use nix::sys::mman::MsFlags; let mut ms_async = false; let args = scan_args::scan_args::<(), (Option,), (), (), (), ()>(args)?; if let Some(flag) = args.optional.0 { let flag = MsFlags::from_bits(flag).unwrap_or(MsFlags::empty()); ms_async = flag.contains(MsFlags::MS_ASYNC); } // The `memmap2` crate does not support the `MS_INVALIDATE` flag. We ignore that // flag if passed in, checking only for `MS_ASYNC`. In practice no arguments are ever // passed to this function, but we do this to maintain compatibility with the // C implementation. self.inner_mut(|inner| inner.flush(ms_async)) .map_err(|e| e.into()) } /// Document-method: munmap /// Document-method: unmap /// /// call-seq: munmap /// /// terminate the association pub fn munmap(rb_self: Obj) -> magnus::error::Result<()> { let rs_self = &*rb_self; rs_self.inner_mut(|inner| { // truncate file to actual used size inner.truncate_file()?; // We are about to release the backing mmap for Ruby's String // objects. If Ruby attempts to read from them the program will // segfault. We update the length of all Strings to zero so Ruby // does not attempt to access the now invalid address between now // and when GC eventually reaps the objects. // // See the following for more detail: // https://gitlab.com/gitlab-org/ruby/gems/prometheus-client-mmap/-/issues/39 // https://gitlab.com/gitlab-org/ruby/gems/prometheus-client-mmap/-/issues/41 // https://gitlab.com/gitlab-org/ruby/gems/prometheus-client-mmap/-/merge_requests/80 inner.set_len(0); Ok(()) })?; // Update each String object to be zero-length. rs_self.update_weak_map(rb_self)?; // Remove the `InnerMmap` from the `RwLock`. This will drop // end of this function, unmapping and closing the file. let _ = rs_self.take_inner()?; Ok(()) } /// Fetch the `used` header from the `.db` file, the length /// in bytes of the data written to the file. pub fn load_used(&self) -> magnus::error::Result { let used = self.inner(|inner| inner.load_used())?; Ok(Integer::from_u64(used as u64)) } /// Update the `used` header for the `.db` file, the length /// in bytes of the data written to the file. pub fn save_used(rb_self: Obj, used: Fixnum) -> magnus::error::Result { let rs_self = &*rb_self; let used_uint = used.to_u32()?; // If the underlying mmap is smaller than the header, then resize to fit. // The file has already been expanded to page size when first opened, so // even if the map is less than HEADER_SIZE, we're not at risk of a // SIGBUS. if rs_self.capacity() < HEADER_SIZE { rs_self.expand_to_fit(rb_self, HEADER_SIZE)?; } rs_self.inner_mut(|inner| inner.save_used(used_uint))?; Ok(used) } /// Fetch the value associated with a key from the mmap. /// If no entry is present, initialize with the default /// value provided. pub fn fetch_entry( rb_self: Obj, positions: RHash, key: RString, default_value: f64, ) -> magnus::error::Result { let rs_self = &*rb_self; let position: Option = positions.lookup(key)?; if let Some(pos) = position { let pos = pos.to_usize()?; return rs_self .inner(|inner| inner.load_value(pos)) .map_err(|e| e.into()); } rs_self.check_expand(rb_self, key.len())?; let value_offset: usize = rs_self.inner_mut(|inner| { // SAFETY: We must not call any Ruby code for the lifetime of this borrow. unsafe { inner.initialize_entry(key.as_slice(), default_value) } })?; // CAST: no-op on 64-bit, widening on 32-bit. positions.aset(key, Integer::from_u64(value_offset as u64))?; rs_self.load_value(value_offset) } /// Update the value of an existing entry, if present. Otherwise create a new entry /// for the key. pub fn upsert_entry( rb_self: Obj, positions: RHash, key: RString, value: f64, ) -> magnus::error::Result { let rs_self = &*rb_self; let position: Option = positions.lookup(key)?; if let Some(pos) = position { let pos = pos.to_usize()?; return rs_self .inner_mut(|inner| { inner.save_value(pos, value)?; // TODO just return `value` here instead of loading it? // This is how the C implementation did it, but I don't // see what the extra load gains us. inner.load_value(pos) }) .map_err(|e| e.into()); } rs_self.check_expand(rb_self, key.len())?; let value_offset: usize = rs_self.inner_mut(|inner| { // SAFETY: We must not call any Ruby code for the lifetime of this borrow. unsafe { inner.initialize_entry(key.as_slice(), value) } })?; // CAST: no-op on 64-bit, widening on 32-bit. positions.aset(key, Integer::from_u64(value_offset as u64))?; rs_self.load_value(value_offset) } /// Creates a Ruby String containing the section of the mmapped file that /// has been written to. fn str(&self, rb_self: Obj) -> magnus::error::Result { let val_id = (*rb_self).inner(|inner| { let ptr = inner.as_ptr(); let len = inner.len(); // SAFETY: This is safe so long as the data provided to Ruby meets its // requirements. When unmapping the file this will no longer be the // case, see the comment on `munmap` for how we handle this. Ok(unsafe { rb_str_new_static(ptr as _, len as _) }) })?; let val = unsafe { Value::from_raw(val_id) }; // UNWRAP: We created this value as a string above. let str = RString::from_value(val).unwrap(); let tracker: Value = rb_self.ivar_get("@weak_obj_tracker")?; // Use the string's Id as the key in the `WeakMap`. let key = str.as_raw(); let _: Value = tracker.funcall("[]=", (key, str))?; Ok(str) } /// If we reallocate, any live Ruby strings provided by the `str()` method /// will be invalidated. We need to iterate over them using and update their /// heap pointers to the newly allocated memory region. fn update_weak_map(&self, rb_self: Obj) -> magnus::error::Result<()> { let tracker: Value = rb_self.ivar_get("@weak_obj_tracker")?; let (new_ptr, new_len) = (*rb_self).inner(|inner| { // Pointers are not `Send`, convert it to usize to allow capture in closure. let new_ptr = inner.as_ptr() as usize; let new_len = util::cast_chk::<_, c_long>(inner.len(), "mmap len")?; Ok((new_ptr, new_len)) })?; // Allocate a block with a closure that updates the string details. let block = Proc::from_fn(move |args, _block| { let val = args .get(0) .ok_or_else(|| err!(arg_error(), "no argument received from `each_value`"))?; let str = RString::from_value(*val) .ok_or_else(|| err!(arg_error(), "weakmap value was not a string"))?; // SAFETY: We're messing with Ruby's internals here, YOLO. unsafe { // Convert the magnus wrapper type to a raw string exposed by `rb_sys`, // which provides access to its internals. let mut raw_str = Self::rb_string_internal(str); // Update the string to point to the new mmapped file. // We're matching the behavior of Ruby's `str_new_static` function. // See https://github.com/ruby/ruby/blob/e51014f9c05aa65cbf203442d37fef7c12390015/string.c#L1030-L1053 raw_str.as_mut().as_.heap.ptr = new_ptr as _; raw_str.as_mut().as_.heap.len = new_len; raw_str.as_mut().as_.heap.aux.capa = new_len; } Ok(()) }); // Execute the block. let _: Value = tracker.funcall_with_block("each_value", (), block)?; Ok(()) } /// Check that the mmap is large enough to contain the value to be added, /// and expand it to fit if necessary. fn check_expand(&self, rb_self: Obj, key_len: usize) -> magnus::error::Result<()> { // CAST: no-op on 32-bit, widening on 64-bit. let used = self.inner(|inner| inner.load_used())? as usize; let entry_len = RawEntry::calc_total_len(key_len)?; // We need the mmapped region to contain at least one byte beyond the // written data to create a NUL- terminated C string. Validate that // new length does not exactly match or exceed the length of the mmap. while self.capacity() <= used.add_chk(entry_len)? { self.expand_to_fit(rb_self, self.capacity().mul_chk(2)?)?; } Ok(()) } /// Expand the underlying file until it is long enough to fit `target_cap`. /// This will remove the existing mmap, expand the file, then update any /// strings held by the `WeakMap` to point to the newly mmapped address. fn expand_to_fit(&self, rb_self: Obj, target_cap: usize) -> magnus::error::Result<()> { if target_cap < self.capacity() { return Err(err!(arg_error(), "Can't reduce the size of mmap")); } let mut new_cap = self.capacity(); while new_cap < target_cap { new_cap = new_cap.mul_chk(2)?; } if new_cap != self.capacity() { // Drop the old mmap. let (mut file, path) = self.take_inner()?.munmap(); self.expand_file(&mut file, &path, target_cap)?; // Re-mmap the expanded file. let new_inner = InnerMmap::reestablish(path, file, target_cap)?; self.insert_inner(new_inner)?; return self.update_weak_map(rb_self); } Ok(()) } /// Use lseek(2) to seek past the end of the file and write a NUL byte. This /// creates a file hole that expands the size of the file without consuming /// disk space until it is actually written to. fn expand_file(&self, file: &mut File, path: &Path, len: usize) -> Result<()> { if len == 0 { return Err(MmapError::overflowed(0, -1, "adding")); } // CAST: no-op on 64-bit, widening on 32-bit. let len = len as u64; match file.seek(SeekFrom::Start(len - 1)) { Ok(_) => {} Err(_) => { return Err(MmapError::WithErrno(format!("Can't lseek {}", len - 1))); } } match file.write(&[0x0]) { Ok(1) => {} _ => { return Err(MmapError::WithErrno(format!( "Can't extend {}", path.display() ))); } } Ok(()) } /// The total capacity of the underlying mmap. #[inline] fn capacity(&self) -> usize { // UNWRAP: This is actually infallible, but we need to // wrap it in a `Result` for use with `inner()`. self.inner(|inner| Ok(inner.capacity())).unwrap() } fn load_value(&self, position: usize) -> magnus::error::Result { self.inner(|inner| inner.load_value(position)) .map_err(|e| e.into()) } /// Takes a closure with immutable access to InnerMmap. Will fail if the inner /// object has a mutable borrow or has been dropped. fn inner(&self, func: F) -> Result where F: FnOnce(&InnerMmap) -> Result, { let inner_opt = self.0.try_read().map_err(|_| MmapError::ConcurrentAccess)?; let inner = inner_opt.as_ref().ok_or(MmapError::UnmappedFile)?; func(inner) } /// Takes a closure with mutable access to InnerMmap. Will fail if the inner /// object has an existing mutable borrow, or has been dropped. fn inner_mut(&self, func: F) -> Result where F: FnOnce(&mut InnerMmap) -> Result, { let mut inner_opt = self .0 .try_write() .map_err(|_| MmapError::ConcurrentAccess)?; let inner = inner_opt.as_mut().ok_or(MmapError::UnmappedFile)?; func(inner) } /// Take ownership of the `InnerMmap` from the `RwLock`. /// Will fail if a mutable borrow is already held or the inner /// object has been dropped. fn take_inner(&self) -> Result { let mut inner_opt = self .0 .try_write() .map_err(|_| MmapError::ConcurrentAccess)?; match (*inner_opt).take() { Some(i) => Ok(i), None => Err(MmapError::UnmappedFile), } } /// Move `new_inner` into the `RwLock`. /// Will return an error if a mutable borrow is already held. fn insert_inner(&self, new_inner: InnerMmap) -> Result<()> { let mut inner_opt = self .0 .try_write() .map_err(|_| MmapError::ConcurrentAccess)?; (*inner_opt).replace(new_inner); Ok(()) } /// Convert `magnus::RString` into the raw binding used by `rb_sys::RString`. /// We need this to manually change the pointer and length values for strings /// when moving the mmap to a new file. unsafe fn rb_string_internal(rb_str: RString) -> NonNull { mem::transmute::>(rb_str) } } #[cfg(test)] mod test { use magnus::error::Error; use magnus::eval; use magnus::Range; use nix::unistd::{sysconf, SysconfVar}; use std::mem::size_of; use super::*; use crate::raw_entry::RawEntry; use crate::testhelper::TestFile; /// Create a wrapped MmapedFile object. fn create_obj() -> Obj { let TestFile { file: _file, path, dir: _dir, } = TestFile::new(&[0u8; 8]); let path_str = path.display().to_string(); let rpath = RString::new(&path_str); eval!("FastMmapedFileRs.new(path)", path = rpath).unwrap() } /// Add three entries to the mmap. Expected length is 56, 3x 16-byte /// entries with 8-byte header. fn populate_entries(rb_self: &Obj) -> RHash { let positions = RHash::from_value(eval("{}").unwrap()).unwrap(); MmapedFile::upsert_entry(*rb_self, positions, RString::new("a"), 0.0).unwrap(); MmapedFile::upsert_entry(*rb_self, positions, RString::new("b"), 1.0).unwrap(); MmapedFile::upsert_entry(*rb_self, positions, RString::new("c"), 2.0).unwrap(); positions } #[test] fn test_new() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let TestFile { file, path, dir: _dir, } = TestFile::new(&[0u8; 8]); let path_str = path.display().to_string(); let rpath = RString::new(&path_str); // Object created successfully let result: std::result::Result, Error> = eval!("FastMmapedFileRs.new(path)", path = rpath); assert!(result.is_ok()); // Weak map added let obj = result.unwrap(); let weak_tracker: Value = obj.ivar_get("@weak_obj_tracker").unwrap(); assert_eq!("ObjectSpace::WeakMap", weak_tracker.class().inspect()); // File expanded to page size let page_size = sysconf(SysconfVar::PAGE_SIZE).unwrap().unwrap() as u64; let stat = file.metadata().unwrap(); assert_eq!(page_size, stat.len()); // Used set to header size assert_eq!( HEADER_SIZE as u64, obj.load_used().unwrap().to_u64().unwrap() ); } #[test] fn test_slice() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let obj = create_obj(); let _ = populate_entries(&obj); // Validate header updated with new length let header_range = Range::new(0, HEADER_SIZE, true).unwrap().as_value(); let header_slice = MmapedFile::slice(obj, &[header_range]).unwrap(); assert_eq!([56, 0, 0, 0, 0, 0, 0, 0], unsafe { header_slice.as_slice() }); let value_range = Range::new(HEADER_SIZE, 24, true).unwrap().as_value(); let value_slice = MmapedFile::slice(obj, &[value_range]).unwrap(); // Validate string length assert_eq!(1u32.to_ne_bytes(), unsafe { &value_slice.as_slice()[0..4] }); // Validate string and padding assert_eq!("a ", unsafe { String::from_utf8_lossy(&value_slice.as_slice()[4..8]) }); // Validate value assert_eq!(0.0f64.to_ne_bytes(), unsafe { &value_slice.as_slice()[8..16] }); } #[test] fn test_slice_resize() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let obj = create_obj(); let _ = populate_entries(&obj); let rs_self = &*obj; let value_range = Range::new(HEADER_SIZE, 24, false).unwrap().as_value(); let value_slice = MmapedFile::slice(obj, &[value_range]).unwrap(); rs_self.expand_to_fit(obj, rs_self.capacity() * 2).unwrap(); // If we haven't updated the pointer to the newly remapped file this will segfault. let _: Value = eval!("puts str", str = value_slice).unwrap(); } #[test] fn test_dont_fill_mmap() { let _cleanup = unsafe { magnus::embed::init() }; let ruby = magnus::Ruby::get().unwrap(); crate::init(&ruby).unwrap(); let obj = create_obj(); let positions = populate_entries(&obj); let rs_self = &*obj; rs_self.expand_to_fit(obj, 1024).unwrap(); let current_used = rs_self.inner(|inner| inner.load_used()).unwrap() as usize; let current_cap = rs_self.inner(|inner| Ok(inner.len())).unwrap(); // Create a new entry that exactly fills the capacity of the mmap. let val_len = current_cap - current_used - HEADER_SIZE - size_of::() - size_of::(); assert_eq!( current_cap, RawEntry::calc_total_len(val_len).unwrap() + current_used ); let str = String::from_utf8(vec![b'A'; val_len]).unwrap(); MmapedFile::upsert_entry(obj, positions, RString::new(&str), 1.0).unwrap(); // Validate that we have expanded the mmap, ensuring a trailing NUL. assert!(rs_self.capacity() > current_cap); } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/mmap/000077500000000000000000000000001442423076500246755ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/mmap/inner.rs000066400000000000000000000550671442423076500263730ustar00rootroot00000000000000use libc::off_t; use memmap2::{MmapMut, MmapOptions}; use nix::libc::c_long; use std::fs::File; use std::mem::size_of; use std::ops::Range; use std::os::unix::prelude::{AsRawFd, RawFd}; use std::path::PathBuf; use crate::error::{MmapError, RubyError}; use crate::raw_entry::RawEntry; use crate::util::CheckedOps; use crate::util::{self, errno, read_f64, read_u32}; use crate::Result; use crate::HEADER_SIZE; /// A mmapped file and its metadata. Ruby never directly interfaces /// with this struct. #[derive(Debug)] pub(super) struct InnerMmap { /// The handle of the file being mmapped. When resizing the /// file we must drop the `InnerMmap` while keeping this open, /// truncate/extend the file, and establish a new `InnerMmap` to /// re-map it. file: File, /// The path of the file. path: PathBuf, /// The mmap itself. When initializing a new entry the length of /// the mmap is used for bounds checking. map: MmapMut, /// The length of data written to the file, used to validate /// whether a `load/save_value` call is in bounds and the length /// we truncate the file to when unmapping. /// /// Equivalent to `i_mm->t->real` in the C implementation. len: usize, } impl InnerMmap { /// Constructs a new `InnerMmap`, mmapping `path`. /// Use when mmapping a file for the first time. When re-mapping a file /// after expanding it the `reestablish` function should be used. pub fn new(path: PathBuf, file: File) -> Result { let stat = file.metadata().map_err(|e| { MmapError::legacy( format!("Can't stat {}: {e}", path.display()), RubyError::Arg, ) })?; let file_size = util::cast_chk::<_, usize>(stat.len(), "file length")?; // We need to ensure the underlying file descriptor is at least a page size. // Otherwise, we could get a SIGBUS error if mmap() attempts to read or write // past the file. let reserve_size = Self::next_page_boundary(file_size)?; // Cast: no-op. Self::reserve_mmap_file_bytes(file.as_raw_fd(), reserve_size as off_t).map_err(|e| { MmapError::legacy( format!( "Can't reserve {reserve_size} bytes for memory-mapped file in {}: {e}", path.display() ), RubyError::Io, ) })?; // Ensure we always have space for the header. let map_len = file_size.max(HEADER_SIZE); // SAFETY: There is the possibility of UB if the file is modified outside of // this program. let map = unsafe { MmapOptions::new().len(map_len).map_mut(&file) }.map_err(|e| { MmapError::legacy(format!("mmap failed ({}): {e}", errno()), RubyError::Arg) })?; let len = file_size; Ok(Self { file, path, map, len, }) } /// Re-mmap a file that was previously mapped. pub fn reestablish(path: PathBuf, file: File, map_len: usize) -> Result { // SAFETY: There is the possibility of UB if the file is modified outside of // this program. let map = unsafe { MmapOptions::new().len(map_len).map_mut(&file) }.map_err(|e| { MmapError::legacy(format!("mmap failed ({}): {e}", errno()), RubyError::Arg) })?; // TODO should we keep this as the old len? We'd want to be able to truncate // to the old length at this point if closing the file. Matching C implementation // for now. let len = map_len; Ok(Self { file, path, map, len, }) } /// Add a new metrics entry to the end of the mmap. This will fail if the mmap is at /// capacity. Callers must expand the file first. /// /// SAFETY: Must not call any Ruby code for the lifetime of `key`, otherwise we risk /// Ruby mutating the underlying `RString`. pub unsafe fn initialize_entry(&mut self, key: &[u8], value: f64) -> Result { // CAST: no-op on 32-bit, widening on 64-bit. let current_used = self.load_used()? as usize; let entry_length = RawEntry::calc_total_len(key.len())?; let new_used = current_used.add_chk(entry_length)?; // Increasing capacity requires expanding the file and re-mmapping it, we can't // perform this from `InnerMmap`. if self.capacity() < new_used { return Err(MmapError::Other(format!( "mmap capacity {} less than {}", self.capacity(), new_used ))); } let bytes = self.map.as_mut(); let value_offset = RawEntry::save(&mut bytes[current_used..new_used], key, value)?; // Won't overflow as value_offset is less than new_used. let position = current_used + value_offset; let new_used32 = util::cast_chk::<_, u32>(new_used, "used")?; self.save_used(new_used32)?; Ok(position) } /// Save a metrics value to an existing entry in the mmap. pub fn save_value(&mut self, offset: usize, value: f64) -> Result<()> { if self.len.add_chk(size_of::())? <= offset { return Err(MmapError::out_of_bounds( offset + size_of::(), self.len, )); } if offset < HEADER_SIZE { return Err(MmapError::Other(format!( "writing to offset {offset} would overwrite file header" ))); } let value_bytes = value.to_ne_bytes(); let value_range = self.item_range(offset, value_bytes.len())?; let bytes = self.map.as_mut(); bytes[value_range].copy_from_slice(&value_bytes); Ok(()) } /// Load a metrics value from an entry in the mmap. pub fn load_value(&self, offset: usize) -> Result { if self.len.add_chk(size_of::())? <= offset { return Err(MmapError::out_of_bounds( offset + size_of::(), self.len, )); } read_f64(self.map.as_ref(), offset) } /// The length of data written to the file. /// With a new file this is only set when Ruby calls `slice` on /// `FastMmapedFileRs`, so even if data has been written to the /// mmap attempts to read will fail until a String is created. /// When an existing file is read we set this value immediately. /// /// Equivalent to `i_mm->t->real` in the C implementation. #[inline] pub fn len(&self) -> usize { self.len } /// The total length in bytes of the mmapped file. /// /// Equivalent to `i_mm->t->len` in the C implementation. #[inline] pub fn capacity(&self) -> usize { self.map.len() } /// Update the length of the mmap considered to be written. pub fn set_len(&mut self, len: usize) { self.len = len; } /// Returns a raw pointer to the mmap. pub fn as_ptr(&self) -> *const u8 { self.map.as_ptr() } /// Perform an msync(2) on the mmap, flushing all changes written /// to disk. The sync may optionally be performed asynchronously. pub fn flush(&mut self, f_async: bool) -> Result<()> { if f_async { self.map .flush_async() .map_err(|_| MmapError::legacy(format!("msync({})", errno()), RubyError::Arg)) } else { self.map .flush() .map_err(|_| MmapError::legacy(format!("msync({})", errno()), RubyError::Arg)) } } /// Truncate the mmapped file to the end of the metrics data. pub fn truncate_file(&mut self) -> Result<()> { // CAST: no-op on 64-bit, widening on 32-bit. let trunc_len = self.len as u64; self.file .set_len(trunc_len) .map_err(|e| MmapError::legacy(format!("truncate: {e}"), RubyError::Type)) } /// Load the `used` header containing the size of the metrics data written. pub fn load_used(&self) -> Result { match read_u32(self.map.as_ref(), 0) { // CAST: we know HEADER_SIZE fits in a u32. Ok(0) => Ok(HEADER_SIZE as u32), u => u, } } /// Update the `used` header to the value provided. /// value provided. pub fn save_used(&mut self, used: u32) -> Result<()> { let bytes = self.map.as_mut(); bytes[..size_of::()].copy_from_slice(&used.to_ne_bytes()); Ok(()) } /// Drop self, which performs an munmap(2) on the mmap, /// returning the open `File` and `PathBuf` so the /// caller can expand the file and re-mmap it. pub fn munmap(self) -> (File, PathBuf) { (self.file, self.path) } // From https://stackoverflow.com/a/22820221: The difference with // ftruncate(2) is that (on file systems supporting it, e.g. Ext4) // disk space is indeed reserved by posix_fallocate but ftruncate // extends the file by adding holes (and without reserving disk // space). #[cfg(target_os = "linux")] fn reserve_mmap_file_bytes(fd: RawFd, len: off_t) -> nix::Result<()> { nix::fcntl::posix_fallocate(fd, 0, len) } // We simplify the reference implementation since we generally // don't need to reserve more than a page size. #[cfg(not(target_os = "linux"))] fn reserve_mmap_file_bytes(fd: RawFd, len: off_t) -> nix::Result<()> { nix::unistd::ftruncate(fd, len) } fn item_range(&self, start: usize, len: usize) -> Result> { let offset_end = start.add_chk(len)?; if offset_end >= self.capacity() { return Err(MmapError::out_of_bounds(offset_end, self.capacity())); } Ok(start..offset_end) } fn next_page_boundary(len: usize) -> Result { use nix::unistd::{self, SysconfVar}; let len = c_long::try_from(len) .map_err(|_| MmapError::failed_cast::<_, c_long>(len, "file len"))?; let mut page_size = match unistd::sysconf(SysconfVar::PAGE_SIZE) { Ok(Some(p)) if p > 0 => p, Ok(Some(p)) => { return Err(MmapError::legacy( format!("Invalid page size {p}"), RubyError::Io, )) } Ok(None) => { return Err(MmapError::legacy( "No system page size found", RubyError::Io, )) } Err(_) => { return Err(MmapError::legacy( "Failed to get system page size: {e}", RubyError::Io, )) } }; while page_size < len { page_size = page_size.mul_chk(2)?; } Ok(page_size) } } #[cfg(test)] mod test { use nix::unistd::{self, SysconfVar}; use super::*; use crate::testhelper::{self, TestEntry, TestFile}; use crate::HEADER_SIZE; #[test] fn test_new() { struct TestCase { name: &'static str, existing: bool, expected_len: usize, } let page_size = unistd::sysconf(SysconfVar::PAGE_SIZE).unwrap().unwrap(); let json = r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#; let value = 1.0; let entry_len = TestEntry::new(json, value).as_bytes().len(); let tc = vec![ TestCase { name: "empty file", existing: false, expected_len: 0, }, TestCase { name: "existing file", existing: true, expected_len: HEADER_SIZE + entry_len, }, ]; for case in tc { let name = case.name; let data = match case.existing { true => testhelper::entries_to_db(&[json], &[1.0], None), false => Vec::new(), }; let TestFile { file: original_file, path, dir: _dir, } = TestFile::new(&data); let original_stat = original_file.metadata().unwrap(); let inner = InnerMmap::new(path.clone(), original_file).unwrap(); let updated_file = File::open(&path).unwrap(); let updated_stat = updated_file.metadata().unwrap(); assert!( updated_stat.len() > original_stat.len(), "test case: {name} - file has been extended" ); assert_eq!( updated_stat.len(), page_size as u64, "test case: {name} - file extended to page size" ); assert_eq!( inner.capacity() as u64, original_stat.len().max(HEADER_SIZE as u64), "test case: {name} - mmap capacity matches original file len, unless smaller than HEADER_SIZE" ); assert_eq!( case.expected_len, inner.len(), "test case: {name} - len set" ); } } #[test] fn test_reestablish() { struct TestCase { name: &'static str, target_len: usize, expected_len: usize, } let json = r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#; let tc = vec![TestCase { name: "ok", target_len: 4096, expected_len: 4096, }]; for case in tc { let name = case.name; let data = testhelper::entries_to_db(&[json], &[1.0], None); let TestFile { file: original_file, path, dir: _dir, } = TestFile::new(&data); let inner = InnerMmap::reestablish(path.clone(), original_file, case.target_len).unwrap(); assert_eq!( case.target_len, inner.capacity(), "test case: {name} - mmap capacity set to target len", ); assert_eq!( case.expected_len, inner.len(), "test case: {name} - len set" ); } } #[test] fn test_initialize_entry() { struct TestCase { name: &'static str, empty: bool, used: Option, expected_used: Option, expected_value_offset: Option, expected_err: Option, } let json = r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#; let value = 1.0; let entry_len = TestEntry::new(json, value).as_bytes().len(); let tc = vec![ TestCase { name: "empty file, not expanded by outer mmap", empty: true, used: None, expected_used: None, expected_value_offset: None, expected_err: Some(MmapError::Other(format!( "mmap capacity {HEADER_SIZE} less than {}", entry_len + HEADER_SIZE, ))), }, TestCase { name: "data in file", empty: false, used: None, expected_used: Some(HEADER_SIZE as u32 + (entry_len * 2) as u32), expected_value_offset: Some(176), expected_err: None, }, TestCase { name: "data in file, invalid used larger than file", empty: false, used: Some(10_000), expected_used: None, expected_value_offset: None, expected_err: Some(MmapError::Other(format!( "mmap capacity 4096 less than {}", 10_000 + entry_len ))), }, ]; for case in tc { let name = case.name; let data = match case.empty { true => Vec::new(), false => testhelper::entries_to_db(&[json], &[1.0], case.used), }; let TestFile { file, path, dir: _dir, } = TestFile::new(&data); if !case.empty { // Ensure the file is large enough to have additional entries added. // Normally the outer mmap handles this. file.set_len(4096).unwrap(); } let mut inner = InnerMmap::new(path, file).unwrap(); let result = unsafe { inner.initialize_entry(json.as_bytes(), value) }; if let Some(expected_used) = case.expected_used { assert_eq!( expected_used, inner.load_used().unwrap(), "test case: {name} - used" ); } if let Some(expected_value_offset) = case.expected_value_offset { assert_eq!( expected_value_offset, *result.as_ref().unwrap(), "test case: {name} - value_offset" ); } if let Some(expected_err) = case.expected_err { assert_eq!( expected_err, result.unwrap_err(), "test case: {name} - error" ); } } } #[test] fn test_save_value() { let json = r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#; let value = 1.0; let upper_bound = TestEntry::new(json, value).as_bytes().len() + HEADER_SIZE; let value_offset = upper_bound - size_of::(); struct TestCase { name: &'static str, empty: bool, len: Option, offset: usize, expected_err: Option, } let tc = vec![ TestCase { name: "existing file, in bounds", empty: false, len: None, offset: upper_bound - size_of::() - 1, expected_err: None, }, TestCase { name: "existing file, out of bounds", empty: false, len: Some(100), offset: upper_bound * 2, expected_err: Some(MmapError::out_of_bounds( upper_bound * 2 + size_of::(), 100, )), }, TestCase { name: "existing file, off by one", empty: false, len: None, offset: value_offset + 1, expected_err: Some(MmapError::out_of_bounds( value_offset + 1 + size_of::(), upper_bound, )), }, TestCase { name: "empty file cannot be saved to", empty: true, len: None, offset: 8, expected_err: Some(MmapError::out_of_bounds(8 + size_of::(), 0)), }, TestCase { name: "overwrite header", empty: false, len: None, offset: 7, expected_err: Some(MmapError::Other( "writing to offset 7 would overwrite file header".to_string(), )), }, ]; for case in tc { let name = case.name; let mut data = match case.empty { true => Vec::new(), false => testhelper::entries_to_db(&[json], &[1.0], None), }; if let Some(len) = case.len { // Pad input to desired length. data.append(&mut vec![0xff; len - upper_bound]); } let TestFile { file, path, dir: _dir, } = TestFile::new(&data); let mut inner = InnerMmap::new(path, file).unwrap(); let result = inner.save_value(case.offset, value); if let Some(expected_err) = case.expected_err { assert_eq!( expected_err, result.unwrap_err(), "test case: {name} - expected err" ); } else { assert!(result.is_ok(), "test case: {name} - success"); assert_eq!( value, util::read_f64(&inner.map, case.offset).unwrap(), "test case: {name} - value saved" ); } } } #[test] fn test_load_value() { let json = r#"["first_family","first_name",["label_a","label_b"],["value_a","value_b"]]"#; let value = 1.0; let total_len = TestEntry::new(json, value).as_bytes().len() + HEADER_SIZE; let value_offset = total_len - size_of::(); struct TestCase { name: &'static str, offset: usize, expected_err: Option, } let tc = vec![ TestCase { name: "in bounds", offset: value_offset, expected_err: None, }, TestCase { name: "out of bounds", offset: value_offset * 2, expected_err: Some(MmapError::out_of_bounds( value_offset * 2 + size_of::(), total_len, )), }, TestCase { name: "off by one", offset: value_offset + 1, expected_err: Some(MmapError::out_of_bounds( value_offset + 1 + size_of::(), total_len, )), }, ]; for case in tc { let name = case.name; let data = testhelper::entries_to_db(&[json], &[1.0], None); let TestFile { file, path, dir: _dir, } = TestFile::new(&data); let inner = InnerMmap::new(path, file).unwrap(); let result = inner.load_value(case.offset); if let Some(expected_err) = case.expected_err { assert_eq!( expected_err, result.unwrap_err(), "test case: {name} - expected err" ); } else { assert!(result.is_ok(), "test case: {name} - success"); assert_eq!(value, result.unwrap(), "test case: {name} - value loaded"); } } } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/parser.rs000066400000000000000000000263471442423076500256210ustar00rootroot00000000000000use smallvec::SmallVec; use std::str; /// String slices pointing to the fields of a borrowed `Entry`'s JSON data. #[derive(PartialEq, Debug)] pub struct MetricText<'a> { pub family_name: &'a str, pub metric_name: &'a str, pub labels: SmallVec<[&'a str; 4]>, pub values: SmallVec<[&'a str; 4]>, } #[derive(PartialEq, Debug)] struct MetricNames<'a> { label_json: &'a str, family_name: &'a str, metric_name: &'a str, } #[derive(PartialEq, Debug)] struct MetricLabelVals<'a> { labels: SmallVec<[&'a str; 4]>, values: SmallVec<[&'a str; 4]>, } /// Parse Prometheus metric data stored in the following format: /// /// ["metric","name",["label_a","label_b"],["value_a","value_b"]] /// /// There will be 1-8 trailing spaces to ensure at least a one-byte /// gap between the json and value, and to pad to an 8-byte alignment. /// We strip the surrounding double quotes from all items. Values may /// or may not have surrounding double quotes, depending on their type. pub fn parse_metrics(json: &str) -> Option { // It would be preferable to use `serde_json` here, but the values // may be strings, numbers, or null, and so don't parse easily to a // defined struct. Using `serde_json::Value` is an option, but since // we're just copying the literal values into a buffer this will be // inefficient and verbose. // Trim trailing spaces from string before processing. We use // `trim_end_matches()` instead of `trim_end()` because we know the // trailing bytes are always ASCII 0x20 bytes. `trim_end()` will also // check for unicode spaces and consume a few more CPU cycles. let trimmed = json.trim_end_matches(' '); let names = parse_names(trimmed)?; let label_json = names.label_json; let label_vals = parse_label_values(label_json)?; Some(MetricText { family_name: names.family_name, metric_name: names.metric_name, labels: label_vals.labels, values: label_vals.values, }) } fn parse_names(json: &str) -> Option { // Starting with: ["family_name","metric_name",[... if !json.starts_with("[\"") { return None; } // Now: family_name","metric_name",[... let remainder = json.get(2..)?; let names_end = remainder.find('[')?; // Save the rest of the slice to parse for labels later. let label_json = remainder.get(names_end..)?; // Now: family_name","metric_name", let remainder = remainder.get(..names_end)?; // Split on commas into: // family_name","metric_name", // ^^^^one^^^^^ ^^^^^two^^^^^ let mut token_iter = remainder.split(','); // Captured: family_name","metric_name", // ^^^^^^^^^^^ let family_name = token_iter.next()?.trim_end_matches('"'); // Captured: "family_name","metric_name", // ^^^^^^^^^^^ let metric_name = token_iter.next()?.trim_matches('"'); // Confirm the final entry of the iter is empty, the the trailing ','. if !token_iter.next()?.is_empty() { return None; } Some(MetricNames { label_json, family_name, metric_name, }) } fn parse_label_values(json: &str) -> Option { // Starting with: ["label_a","label_b"],["value_a", "value_b"]] if !(json.starts_with('[') && json.ends_with("]]")) { return None; } // Validate we either have the start of a label string or an // empty array, e.g. `["` or `[]`. if !matches!(json.as_bytes().get(1)?, b'"' | b']') { return None; } // Now: "label_a","label_b" let labels_end = json.find(']')?; let label_range = json.get(1..labels_end)?; let mut labels = SmallVec::new(); // Split on commas into: // "label_a","label_b" // ^^^one^^^ ^^^two^^^ for label in label_range.split(',') { // Captured: "label_a","label_b" // ^^^^^^^ // If there are no labels, e.g. `[][]`, then don't capture anything. if !label.is_empty() { labels.push(label.trim_matches('"')); } } // Now: ],["value_a", "value_b"]] let mut values_range = json.get(labels_end..)?; // Validate we have a separating comma with one and only one leading bracket. if !(values_range.starts_with("],[") && values_range.as_bytes().get(3)? != &b'[') { return None; } // Now: "value_a", "value_b"]] values_range = values_range.get(3..)?; let values_end = values_range.find(']')?; // Validate we have only two trailing brackets. if values_range.get(values_end..)?.len() > 2 { return None; } // Now: "value_a", "value_b" values_range = values_range.get(..values_end)?; let mut values = SmallVec::new(); // Split on commas into: // "value_a","value_b" // ^^^one^^^ ^^^two^^^ for value in values_range.split(',') { // Captured: "value_a","value_b" // ^^^^^^^^^ // If there are no values, e.g. `[][]`, then don't capture anything. if !value.is_empty() { values.push(value.trim_matches('"')); } } if values.len() != labels.len() { return None; } Some(MetricLabelVals { labels, values }) } #[cfg(test)] mod test { use smallvec::smallvec; use super::*; struct TestCase { name: &'static str, input: &'static str, expected: Option>, } #[test] fn valid_json() { let tc = vec![ TestCase { name: "basic", input: r#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, expected: Some(MetricText { family_name: "metric", metric_name: "name", labels: smallvec!["label_a", "label_b"], values: smallvec!["value_a", "value_b"], }), }, TestCase { name: "many labels", input: r#"["metric","name",["label_a","label_b","label_c","label_d","label_e"],["value_a","value_b","value_c","value_d","value_e"]]"#, expected: Some(MetricText { family_name: "metric", metric_name: "name", labels: smallvec!["label_a", "label_b", "label_c", "label_d", "label_e"], values: smallvec!["value_a", "value_b", "value_c", "value_d", "value_e"], }), }, TestCase { name: "numeric value", input: r#"["metric","name",["label_a","label_b"],["value_a",403]]"#, expected: Some(MetricText { family_name: "metric", metric_name: "name", labels: smallvec!["label_a", "label_b"], values: smallvec!["value_a", "403"], }), }, TestCase { name: "null value", input: r#"["metric","name",["label_a","label_b"],[null,"value_b"]]"#, expected: Some(MetricText { family_name: "metric", metric_name: "name", labels: smallvec!["label_a", "label_b"], values: smallvec!["null", "value_b"], }), }, TestCase { name: "no labels", input: r#"["metric","name",[],[]]"#, expected: Some(MetricText { family_name: "metric", metric_name: "name", labels: smallvec![], values: smallvec![], }), }, ]; for case in tc { assert_eq!( parse_metrics(case.input), case.expected, "test case: {}", case.name, ); } } #[test] fn invalid_json() { let tc = vec![ TestCase { name: "not json", input: "hello, world", expected: None, }, TestCase { name: "no names", input: r#"[["label_a","label_b"],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "too many names", input: r#"["metric","name","unexpected_name",["label_a","label_b"],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "too many labels", input: r#"["metric","name","unexpected_name",["label_a","label_b","label_c"],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "too many values", input: r#"["metric","name",["label_a","label_b"],["value_a","value_b",null]]"#, expected: None, }, TestCase { name: "no values", input: r#"["metric","name",["label_a","label_b"]"#, expected: None, }, TestCase { name: "no arrays", input: r#"["metric","name","label_a","value_a"]"#, expected: None, }, TestCase { name: "too many leading brackets", input: r#"[["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "too many trailing brackets", input: r#"["metric","name",["label_a","label_b"],["value_a","value_b"]]]"#, expected: None, }, TestCase { name: "too many leading label brackets", input: r#"["metric","name",[["label_a","label_b"],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "too many trailing label brackets", input: r#"["metric","name",["label_a","label_b"]],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "too many leading value brackets", input: r#"["metric","name",["label_a","label_b"],[["value_a","value_b"]]"#, expected: None, }, TestCase { name: "comma in family name", input: r#"["met,ric","name",["label_a","label_b"],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "comma in metric name", input: r#"["metric","na,me",["label_a","label_b"],["value_a","value_b"]]"#, expected: None, }, TestCase { name: "comma in value", input: r#"["metric","na,me",["label_a","label_b"],["val,ue_a","value_b"]]"#, expected: None, }, TestCase { name: "comma in numeric value", input: r#"["metric","name",["label_a","label_b"],[400,0,"value_b"]]"#, expected: None, }, ]; for case in tc { assert_eq!( case.expected, parse_metrics(case.input), "test case: {}", case.name, ); } } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/raw_entry.rs000066400000000000000000000370731442423076500263350ustar00rootroot00000000000000use std::mem::size_of; use crate::error::MmapError; use crate::util; use crate::util::CheckedOps; use crate::Result; /// The logic to save a `MetricsEntry`, or parse one from a byte slice. #[derive(PartialEq, Clone, Debug)] pub struct RawEntry<'a> { bytes: &'a [u8], encoded_len: usize, } impl<'a> RawEntry<'a> { /// Save an entry to the mmap, returning the value offset in the newly created entry. pub fn save(bytes: &'a mut [u8], key: &[u8], value: f64) -> Result { let total_len = Self::calc_total_len(key.len())?; if total_len > bytes.len() { return Err(MmapError::Other(format!( "entry length {total_len} larger than slice length {}", bytes.len() ))); } // CAST: `calc_len` runs `check_encoded_len`, we know the key len // is less than i32::MAX. No risk of overflows or failed casts. let key_len: u32 = key.len() as u32; // Write the key length to the mmap. bytes[..size_of::()].copy_from_slice(&key_len.to_ne_bytes()); // Advance slice past the size. let bytes = &mut bytes[size_of::()..]; bytes[..key.len()].copy_from_slice(key); // Advance to end of key. let bytes = &mut bytes[key.len()..]; let pad_len = Self::padding_len(key.len()); bytes[..pad_len].fill(b' '); let bytes = &mut bytes[pad_len..]; bytes[..size_of::()].copy_from_slice(&value.to_ne_bytes()); Self::calc_value_offset(key.len()) } /// Parse a byte slice starting into an `MmapEntry`. pub fn from_slice(bytes: &'a [u8]) -> Result { // CAST: no-op on 32-bit, widening on 64-bit. let encoded_len = util::read_u32(bytes, 0)? as usize; let total_len = Self::calc_total_len(encoded_len)?; // Confirm the value is in bounds of the slice provided. if total_len > bytes.len() { return Err(MmapError::out_of_bounds(total_len, bytes.len())); } // Advance slice past length int and cut at end of entry. let bytes = &bytes[size_of::()..total_len]; Ok(Self { bytes, encoded_len }) } /// Read the `f64` value of an entry from memory. #[inline] pub fn value(&self) -> f64 { // We've stripped off the leading u32, don't include that here. let offset = self.encoded_len + Self::padding_len(self.encoded_len); // UNWRAP: We confirm in the constructor that the value offset // is in-range for the slice. util::read_f64(self.bytes, offset).unwrap() } /// The length of the entry key without padding. #[inline] pub fn encoded_len(&self) -> usize { self.encoded_len } /// Returns a slice with the JSON string in the entry, excluding padding. #[inline] pub fn json(&self) -> &[u8] { &self.bytes[..self.encoded_len] } /// Calculate the total length of an `MmapEntry`, including the string length, /// string, padding, and value. #[inline] pub fn total_len(&self) -> usize { // UNWRAP:: We confirmed in the constructor that this doesn't overflow. Self::calc_total_len(self.encoded_len).unwrap() } /// Calculate the total length of an `MmapEntry`, including the string length, /// string, padding, and value. Validates encoding_len is within expected bounds. #[inline] pub fn calc_total_len(encoded_len: usize) -> Result { Self::calc_value_offset(encoded_len)?.add_chk(size_of::()) } /// Calculate the value offset of an `MmapEntry`, including the string length, /// string, padding. Validates encoding_len is within expected bounds. #[inline] pub fn calc_value_offset(encoded_len: usize) -> Result { Self::check_encoded_len(encoded_len)?; Ok(size_of::() + encoded_len + Self::padding_len(encoded_len)) } /// Calculate the number of padding bytes to add to the value key to reach /// 8-byte alignment. Does not validate key length. #[inline] pub fn padding_len(encoded_len: usize) -> usize { 8 - (size_of::() + encoded_len) % 8 } #[inline] fn check_encoded_len(encoded_len: usize) -> Result<()> { if encoded_len as u64 > i32::MAX as u64 { return Err(MmapError::KeyLength); } Ok(()) } } #[cfg(test)] mod test { use bstr::ByteSlice; use super::*; use crate::testhelper::TestEntry; #[test] fn test_from_slice() { #[derive(PartialEq, Default, Debug)] struct TestCase { name: &'static str, input: TestEntry, expected_enc_len: Option, expected_err: Option, } let tc = vec![ TestCase { name: "ok", input: TestEntry { header: 61, json: r#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, padding_len: 7, value: 1.0, }, expected_enc_len: Some(61), ..Default::default() }, TestCase { name: "zero length key", input: TestEntry { header: 0, json: "", padding_len: 4, value: 1.0, }, expected_enc_len: Some(0), ..Default::default() }, TestCase { name: "header value too large", input: TestEntry { header: i32::MAX as u32 + 1, json: "foo", padding_len: 1, value: 0.0, }, expected_err: Some(MmapError::KeyLength), ..Default::default() }, TestCase { name: "header value much longer than json len", input: TestEntry { header: 256, json: "foobar", padding_len: 6, value: 1.0, }, expected_err: Some(MmapError::out_of_bounds(272, 24)), ..Default::default() }, TestCase { // Situations where encoded_len is wrong but padding makes the // value offset the same are not caught. name: "header off by one", input: TestEntry { header: 4, json: "123", padding_len: 1, value: 1.0, }, expected_err: Some(MmapError::out_of_bounds(24, 16)), ..Default::default() }, ]; for case in tc { let name = case.name; let input = case.input.as_bstring(); let resp = RawEntry::from_slice(&input); if case.expected_err.is_none() { let expected_buf = case.input.as_bytes_no_header(); let resp = resp.as_ref().unwrap(); let bytes = resp.bytes; assert_eq!(expected_buf, bytes.as_bstr(), "test case: {name} - bytes",); assert_eq!( resp.json(), case.input.json.as_bytes(), "test case: {name} - json matches" ); assert_eq!( resp.total_len(), case.input.as_bstring().len(), "test case: {name} - total_len matches" ); assert_eq!( resp.encoded_len(), case.input.json.len(), "test case: {name} - encoded_len matches" ); assert!( resp.json().iter().all(|&c| c != b' '), "test case: {name} - no spaces in json" ); let padding_len = RawEntry::padding_len(case.input.json.len()); assert!( bytes[resp.encoded_len..resp.encoded_len + padding_len] .iter() .all(|&c| c == b' '), "test case: {name} - padding is spaces" ); assert_eq!( resp.value(), case.input.value, "test case: {name} - value is correct" ); } if let Some(expected_enc_len) = case.expected_enc_len { assert_eq!( expected_enc_len, resp.as_ref().unwrap().encoded_len, "test case: {name} - encoded len", ); } if let Some(expected_err) = case.expected_err { assert_eq!(expected_err, resp.unwrap_err(), "test case: {name} - error",); } } } #[test] fn test_save() { struct TestCase { name: &'static str, key: &'static [u8], value: f64, buf_len: usize, expected_entry: Option, expected_resp: Result, } // TODO No test case to validate keys with len > i32::MAX, adding a static that large crashes // the test binary. let tc = vec![ TestCase { name: "ok", key: br#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, value: 256.0, buf_len: 256, expected_entry: Some(TestEntry { header: 61, json: r#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, padding_len: 7, value: 256.0, }), expected_resp: Ok(72), }, TestCase { name: "zero length key", key: b"", value: 1.0, buf_len: 256, expected_entry: Some(TestEntry { header: 0, json: "", padding_len: 4, value: 1.0, }), expected_resp: Ok(8), }, TestCase { name: "infinite value", key: br#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, value: f64::INFINITY, buf_len: 256, expected_entry: Some(TestEntry { header: 61, json: r#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, padding_len: 7, value: f64::INFINITY, }), expected_resp: Ok(72), }, TestCase { name: "buf len matches entry len", key: br#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, value: 1.0, buf_len: 80, expected_entry: Some(TestEntry { header: 61, json: r#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, padding_len: 7, value: 1.0, }), expected_resp: Ok(72), }, TestCase { name: "buf much too short", key: br#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, value: 1.0, buf_len: 5, expected_entry: None, expected_resp: Err(MmapError::Other(format!( "entry length {} larger than slice length {}", 80, 5, ))), }, TestCase { name: "buf short by one", key: br#"["metric","name",["label_a","label_b"],["value_a","value_b"]]"#, value: 1.0, buf_len: 79, expected_entry: None, expected_resp: Err(MmapError::Other(format!( "entry length {} larger than slice length {}", 80, 79, ))), }, ]; for case in tc { let mut buf = vec![0; case.buf_len]; let resp = RawEntry::save(&mut buf, case.key, case.value); assert_eq!( case.expected_resp, resp, "test case: {} - response", case.name, ); if let Some(e) = case.expected_entry { let expected_buf = e.as_bstring(); assert_eq!( expected_buf, buf[..expected_buf.len()].as_bstr(), "test case: {} - buffer state", case.name ); let header_len = u32::from_ne_bytes(buf[..size_of::()].try_into().unwrap()); assert_eq!( case.key.len(), header_len as usize, "test case: {} - size header", case.name, ); } } } #[test] fn test_calc_value_offset() { struct TestCase { name: &'static str, encoded_len: usize, expected_value_offset: Option, expected_total_len: Option, expected_err: Option, } let tc = vec![ TestCase { name: "ok", encoded_len: 8, expected_value_offset: Some(16), expected_total_len: Some(24), expected_err: None, }, TestCase { name: "padding length one", encoded_len: 3, expected_value_offset: Some(8), expected_total_len: Some(16), expected_err: None, }, TestCase { name: "padding length eight", encoded_len: 4, expected_value_offset: Some(16), expected_total_len: Some(24), expected_err: None, }, TestCase { name: "encoded len gt i32::MAX", encoded_len: i32::MAX as usize + 1, expected_value_offset: None, expected_total_len: None, expected_err: Some(MmapError::KeyLength), }, ]; for case in tc { let name = case.name; if let Some(expected_value_offset) = case.expected_value_offset { assert_eq!( expected_value_offset, RawEntry::calc_value_offset(case.encoded_len).unwrap(), "test case: {name} - value offset" ); } if let Some(expected_total_len) = case.expected_total_len { assert_eq!( expected_total_len, RawEntry::calc_total_len(case.encoded_len).unwrap(), "test case: {name} - total len" ); } if let Some(expected_err) = case.expected_err { assert_eq!( expected_err, RawEntry::calc_value_offset(case.encoded_len).unwrap_err(), "test case: {name} - err" ); } } } #[test] fn test_padding_len() { for encoded_len in 0..64 { let padding = RawEntry::padding_len(encoded_len); // Validate we're actually aligning to 8 bytes. assert!((size_of::() + encoded_len + padding) % 8 == 0) } } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/testhelper.rs000066400000000000000000000132671442423076500265010ustar00rootroot00000000000000use bstr::{BString, B}; use std::fs::File; use std::io::{Read, Seek, Write}; use std::path::PathBuf; use tempfile::{tempdir, TempDir}; use crate::raw_entry::RawEntry; use crate::HEADER_SIZE; #[derive(PartialEq, Default, Debug)] pub struct TestEntry { pub header: u32, pub json: &'static str, pub padding_len: usize, pub value: f64, } impl TestEntry { pub fn new(json: &'static str, value: f64) -> Self { TestEntry { header: json.len() as u32, json, padding_len: RawEntry::padding_len(json.len()), value, } } pub fn as_bytes(&self) -> Vec { [ B(&self.header.to_ne_bytes()), self.json.as_bytes(), &vec![b' '; self.padding_len], B(&self.value.to_ne_bytes()), ] .concat() } pub fn as_bstring(&self) -> BString { [ B(&self.header.to_ne_bytes()), self.json.as_bytes(), &vec![b' '; self.padding_len], B(&self.value.to_ne_bytes()), ] .concat() .into() } pub fn as_bytes_no_header(&self) -> BString { [ self.json.as_bytes(), &vec![b' '; self.padding_len], B(&self.value.to_ne_bytes()), ] .concat() .into() } } /// Format the data for a `.db` file. /// Optional header value can be used to set an invalid `used` size. pub fn entries_to_db(entries: &[&'static str], values: &[f64], header: Option) -> Vec { let mut out = Vec::new(); let entry_bytes: Vec<_> = entries .iter() .zip(values) .flat_map(|(e, val)| TestEntry::new(e, *val).as_bytes()) .collect(); let used = match header { Some(u) => u, None => (entry_bytes.len() + HEADER_SIZE) as u32, }; out.extend(used.to_ne_bytes()); out.extend(&[0x0u8; 4]); // Padding. out.extend(entry_bytes); out } /// A temporary file, path, and dir for use with testing. #[derive(Debug)] pub struct TestFile { pub file: File, pub path: PathBuf, pub dir: TempDir, } impl TestFile { pub fn new(file_data: &[u8]) -> TestFile { let dir = tempdir().unwrap(); let path = dir.path().join("test.db"); let mut file = File::options() .create(true) .read(true) .write(true) .open(&path) .unwrap(); file.write_all(file_data).unwrap(); file.sync_all().unwrap(); file.rewind().unwrap(); // We need to keep `dir` in scope so it doesn't drop before the files it // contains, which may prevent cleanup. TestFile { file, path, dir } } } mod test { use super::*; #[test] fn test_entry_new() { let json = "foobar"; let value = 1.0f64; let expected = TestEntry { header: 6, json, padding_len: 6, value, }; let actual = TestEntry::new(json, value); assert_eq!(expected, actual); } #[test] fn test_entry_bytes() { let json = "foobar"; let value = 1.0f64; let expected = [ &6u32.to_ne_bytes(), B(json), &[b' '; 6], &value.to_ne_bytes(), ] .concat(); let actual = TestEntry::new(json, value).as_bstring(); assert_eq!(expected, actual); } #[test] fn test_entry_bytes_no_header() { let json = "foobar"; let value = 1.0f64; let expected = [B(json), &[b' '; 6], &value.to_ne_bytes()].concat(); let actual = TestEntry::new(json, value).as_bytes_no_header(); assert_eq!(expected, actual); } #[test] fn test_entries_to_db_header_correct() { let json = &["foobar", "qux"]; let values = &[1.0, 2.0]; let out = entries_to_db(json, values, None); assert_eq!(48u32.to_ne_bytes(), out[0..4], "used set correctly"); assert_eq!([0u8; 4], out[4..8], "padding set"); assert_eq!( TestEntry::new(json[0], values[0]).as_bytes(), out[8..32], "first entry matches" ); assert_eq!( TestEntry::new(json[1], values[1]).as_bytes(), out[32..48], "second entry matches" ); } #[test] fn test_entries_to_db_header_wrong() { let json = &["foobar", "qux"]; let values = &[1.0, 2.0]; const WRONG_USED: u32 = 1000; let out = entries_to_db(json, values, Some(WRONG_USED)); assert_eq!( WRONG_USED.to_ne_bytes(), out[0..4], "used set to value requested" ); assert_eq!([0u8; 4], out[4..8], "padding set"); assert_eq!( TestEntry::new(json[0], values[0]).as_bytes(), out[8..32], "first entry matches" ); assert_eq!( TestEntry::new(json[1], values[1]).as_bytes(), out[32..48], "second entry matches" ); } #[test] fn test_file() { let mut test_file = TestFile::new(b"foobar"); let stat = test_file.file.metadata().unwrap(); assert_eq!(6, stat.len(), "file length"); assert_eq!( 0, test_file.file.stream_position().unwrap(), "at start of file" ); let mut out_buf = vec![0u8; 256]; let read_result = test_file.file.read(&mut out_buf); assert!(read_result.is_ok()); assert_eq!(6, read_result.unwrap(), "file is readable"); let write_result = test_file.file.write(b"qux"); assert!(write_result.is_ok()); assert_eq!(3, write_result.unwrap(), "file is writable"); } } prometheus-client-mmap-v0.23.1/ext/fast_mmaped_file_rs/src/util.rs000066400000000000000000000067771442423076500253070ustar00rootroot00000000000000use nix::errno::Errno; use nix::libc::c_long; use std::fmt::Display; use std::io; use std::mem::size_of; use crate::error::MmapError; use crate::Result; /// Wrapper around `checked_add()` that converts failures /// to `MmapError::Overflow`. pub trait CheckedOps: Sized { fn add_chk(self, rhs: Self) -> Result; fn mul_chk(self, rhs: Self) -> Result; } impl CheckedOps for usize { fn add_chk(self, rhs: Self) -> Result { self.checked_add(rhs) .ok_or_else(|| MmapError::overflowed(self, rhs, "adding")) } fn mul_chk(self, rhs: Self) -> Result { self.checked_mul(rhs) .ok_or_else(|| MmapError::overflowed(self, rhs, "multiplying")) } } impl CheckedOps for c_long { fn add_chk(self, rhs: Self) -> Result { self.checked_add(rhs) .ok_or_else(|| MmapError::overflowed(self, rhs, "adding")) } fn mul_chk(self, rhs: Self) -> Result { self.checked_mul(rhs) .ok_or_else(|| MmapError::overflowed(self, rhs, "multiplying")) } } /// A wrapper around `TryFrom`, returning `MmapError::FailedCast` on error. pub fn cast_chk(val: T, name: &str) -> Result where T: Copy + Display, U: std::convert::TryFrom, { U::try_from(val).map_err(|_| MmapError::failed_cast::(val, name)) } /// Retrieve errno(3). pub fn errno() -> i32 { // UNWRAP: This will always return `Some` when called from `last_os_error()`. io::Error::last_os_error().raw_os_error().unwrap() } /// Get the error string associated with errno(3). /// Equivalent to strerror(3). pub fn strerror(errno: i32) -> &'static str { Errno::from_i32(errno).desc() } /// Read a `u32` value from a byte slice starting from `offset`. #[inline] pub fn read_u32(buf: &[u8], offset: usize) -> Result { if let Some(slice) = buf.get(offset..offset + size_of::()) { // UNWRAP: We can safely unwrap the conversion from slice to array as we // the source and targets are constructed here with the same length. let out: &[u8; size_of::()] = slice.try_into().unwrap(); return Ok(u32::from_ne_bytes(*out)); } Err(MmapError::out_of_bounds(offset, buf.len())) } /// Read an `f64` value from a byte slice starting from `offset`. #[inline] pub fn read_f64(buf: &[u8], offset: usize) -> Result { if let Some(slice) = buf.get(offset..offset + size_of::()) { // UNWRAP: We can safely unwrap the conversion from slice to array as we // can be sure the target array has same length as the source slice. let out: &[u8; size_of::()] = slice.try_into().unwrap(); return Ok(f64::from_ne_bytes(*out)); } Err(MmapError::out_of_bounds( offset + size_of::(), buf.len(), )) } #[cfg(test)] mod test { use super::*; #[test] fn test_read_u32() { let buf = 1u32.to_ne_bytes(); assert!(matches!(read_u32(&buf, 0), Ok(1)), "index ok"); assert!(read_u32(&buf, 10).is_err(), "index out of range"); assert!( read_u32(&buf, 1).is_err(), "index in range but end out of range" ); } #[test] fn test_read_f64() { let buf = 1.00f64.to_ne_bytes(); let ok = read_f64(&buf, 0); assert!(ok.is_ok()); assert_eq!(ok.unwrap(), 1.00); assert!(read_f64(&buf, 10).is_err(), "index out of range"); assert!( read_f64(&buf, 1).is_err(), "index in range but end out of range" ); } } prometheus-client-mmap-v0.23.1/fuzz/000077500000000000000000000000001442423076500173675ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/fuzz/prometheus/000077500000000000000000000000001442423076500215625ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/fuzz/prometheus/client/000077500000000000000000000000001442423076500230405ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/fuzz/prometheus/client/helpers/000077500000000000000000000000001442423076500245025ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/fuzz/prometheus/client/helpers/fuzz_mmaped_file.rb000066400000000000000000000040661442423076500303550ustar00rootroot00000000000000require 'fuzzbert' require 'tempfile' require 'prometheus/client' require 'prometheus/client/helper/mmaped_file' require 'fast_mmaped_file' module MmapedFileHelper def self.assert_equals(a, b) raise "#{a} not equal #{b}" unless a == b || a.is_a?(Float) && a.nan? && b.is_a?(Float) && b.nan? end def self.process(filepath) f = Prometheus::Client::Helper::MmapedFile.open(filepath) metrics = {} f.to_metrics(metrics, true) positions = {} f.entries(true).each do |data, encoded_len, value_offset, pos| encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset)) positions[encoded] = pos assert_equals(f.fetch_entry(positions, encoded, value - 1), value) end f.upsert_entry(positions, 'key', 0.1) assert_equals(f.fetch_entry(positions, 'key', 0), 0.1) f.upsert_entry(positions, 'key2', 0.2) assert_equals(f.fetch_entry(positions, 'key2', 0), 0.2) rescue Prometheus::Client::Helper::EntryParser::ParsingError rescue PrometheusParsingError ensure # fuzzbert wraps the process in a trap context. Ruby doesn't allow # mutexes to be synchronized in that context, so we need to spawn another # thread to close the file. Thread.new { f.close }.join(0.5) end end class PrintAndExitHandler def handle(error_data) puts error_data[:id] p error_data[:data] puts error_data[:pid] puts error_data[:status] Process.exit(1) end end fuzz 'MmapedFile' do deploy do |data| tmpfile = Tempfile.new('mmmaped_file') tmpfile.write(data) tmpfile.close MmapedFileHelper.process(tmpfile.path) tmpfile.unlink end data 'completely random' do FuzzBert::Generators.random end data 'should have 10000 bytes used and first entry' do c = FuzzBert::Container.new c << FuzzBert::Generators.fixed([10000, 0, 11, '[1,1,[],[]] ', 1].pack('LLLA12d')) c << FuzzBert::Generators.random(2) c << FuzzBert::Generators.fixed([0, 0].pack('CC')) c << FuzzBert::Generators.fixed('[1,1,[],[]]') c << FuzzBert::Generators.random c.generator end end prometheus-client-mmap-v0.23.1/known-leaks-suppression.txt000066400000000000000000000002051442423076500237500ustar00rootroot00000000000000leak:onig_new leak:onig_compile leak:onig_bbuf_init leak:onig_region_resize leak:ruby_xcalloc leak:/lib/x86_64-linux-gnu/libruby-2.3 prometheus-client-mmap-v0.23.1/lib/000077500000000000000000000000001442423076500171375ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/lib/prometheus.rb000066400000000000000000000001371442423076500216600ustar00rootroot00000000000000# Prometheus is a generic time-series collection and computation server. module Prometheus end prometheus-client-mmap-v0.23.1/lib/prometheus/000077500000000000000000000000001442423076500213325ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/lib/prometheus/client.rb000066400000000000000000000033251442423076500231400ustar00rootroot00000000000000require 'prometheus/client/registry' require 'prometheus/client/configuration' require 'prometheus/client/mmaped_value' module Prometheus # Client is a ruby implementation for a Prometheus compatible client. module Client class << self attr_writer :configuration def configuration @configuration ||= Configuration.new end def configure yield(configuration) end # Returns a default registry object def registry @registry ||= Registry.new end def logger configuration.logger end def pid configuration.pid_provider.call end # Resets the registry and reinitializes all metrics files. # Use case: clean up everything in specs `before` block, # to prevent leaking the state between specs which are updating metrics. def reset! @registry = nil ::Prometheus::Client::MmapedValue.reset_and_reinitialize end def cleanup! Dir.glob("#{configuration.multiprocess_files_dir}/*.db").each { |f| File.unlink(f) if File.exist?(f) } end # With `force: false`: reinitializes metric files only for processes with the changed PID. # With `force: true`: reinitializes all metrics files. # Always keeps the registry. # Use case (`force: false`): pick up new metric files on each worker start, # without resetting already registered files for the master or previously initialized workers. def reinitialize_on_pid_change(force: false) if force ::Prometheus::Client::MmapedValue.reset_and_reinitialize else ::Prometheus::Client::MmapedValue.reinitialize_on_pid_change end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/000077500000000000000000000000001442423076500226105ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/lib/prometheus/client/configuration.rb000066400000000000000000000013401442423076500260020ustar00rootroot00000000000000require 'prometheus/client/registry' require 'prometheus/client/mmaped_value' require 'prometheus/client/page_size' require 'logger' require 'tmpdir' module Prometheus module Client class Configuration attr_accessor :value_class, :multiprocess_files_dir, :initial_mmap_file_size, :logger, :pid_provider def initialize @value_class = ::Prometheus::Client::MmapedValue @initial_mmap_file_size = ::Prometheus::Client::PageSize.page_size(fallback_page_size: 4096) @logger = Logger.new($stdout) @pid_provider = Process.method(:pid) @multiprocess_files_dir = ENV.fetch('prometheus_multiproc_dir') do Dir.mktmpdir("prometheus-mmap") end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/counter.rb000066400000000000000000000011061442423076500246120ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client/metric' module Prometheus module Client # Counter is a metric that exposes merely a sum or tally of things. class Counter < Metric def type :counter end def increment(labels = {}, by = 1) raise ArgumentError, 'increment must be a non-negative number' if by < 0 label_set = label_set_for(labels) synchronize { @values[label_set].increment(by) } end private def default(labels) value_object(type, @name, @name, labels) end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/formats/000077500000000000000000000000001442423076500242635ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/lib/prometheus/client/formats/text.rb000066400000000000000000000074531442423076500256050ustar00rootroot00000000000000require 'prometheus/client/uses_value_type' require 'prometheus/client/helper/json_parser' require 'prometheus/client/helper/plain_file' require 'prometheus/client/helper/metrics_processing' require 'prometheus/client/helper/metrics_representation' module Prometheus module Client module Formats # Text format is human readable mainly used for manual inspection. module Text MEDIA_TYPE = 'text/plain'.freeze VERSION = '0.0.4'.freeze CONTENT_TYPE = "#{MEDIA_TYPE}; version=#{VERSION}".freeze class << self def marshal(registry) metrics = registry.metrics.map do |metric| samples = metric.values.flat_map do |label_set, value| representation(metric, label_set, value) end [metric.name, { type: metric.type, help: metric.docstring, samples: samples }] end Helper::MetricsRepresentation.to_text(metrics) end def marshal_multiprocess(path = Prometheus::Client.configuration.multiprocess_files_dir, use_rust: false) file_list = Dir.glob(File.join(path, '*.db')).sort .map {|f| Helper::PlainFile.new(f) } .map {|f| [f.filepath, f.multiprocess_mode.to_sym, f.type.to_sym, f.pid] } if use_rust && rust_impl_available? FastMmapedFileRs.to_metrics(file_list.to_a) else FastMmapedFile.to_metrics(file_list.to_a) end end def rust_impl_available? return @rust_available unless @rust_available.nil? check_for_rust end private def load_rust_extension begin ruby_version = /(\d+\.\d+)/.match(RUBY_VERSION) require_relative "../../../#{ruby_version}/fast_mmaped_file_rs" rescue LoadError require 'fast_mmaped_file_rs' end end def check_for_rust # This will be evaluated on each invocation even with `||=` if # `@rust_available` if false. Running a `require` statement is slow, # so the `rust_impl_available?` method memoizes the result, external # callers can only trigger this method a single time. @rust_available = begin load_rust_extension true rescue LoadError Prometheus::Client.logger.info('FastMmapedFileRs unavailable') false end end def load_metrics(path) metrics = {} Dir.glob(File.join(path, '*.db')).sort.each do |f| Helper::PlainFile.new(f).to_metrics(metrics) end metrics end def representation(metric, label_set, value) labels = metric.base_labels.merge(label_set) if metric.type == :summary summary(metric.name, labels, value) elsif metric.type == :histogram histogram(metric.name, labels, value) else [[metric.name, labels, value.get]] end end def summary(name, set, value) rv = value.get.map do |q, v| [name, set.merge(quantile: q), v] end rv << ["#{name}_sum", set, value.get.sum] rv << ["#{name}_count", set, value.get.total] rv end def histogram(name, set, value) # |metric_name, labels, value| rv = value.get.map do |q, v| [name, set.merge(le: q), v] end rv << [name, set.merge(le: '+Inf'), value.get.total] rv << ["#{name}_sum", set, value.get.sum] rv << ["#{name}_count", set, value.get.total] rv end end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/gauge.rb000066400000000000000000000021171442423076500242260ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client/metric' module Prometheus module Client # A Gauge is a metric that exposes merely an instantaneous value or some # snapshot thereof. class Gauge < Metric def initialize(name, docstring, base_labels = {}, multiprocess_mode=:all) super(name, docstring, base_labels) if value_class.multiprocess and ![:min, :max, :livesum, :liveall, :all].include?(multiprocess_mode) raise ArgumentError, 'Invalid multiprocess mode: ' + multiprocess_mode end @multiprocess_mode = multiprocess_mode end def type :gauge end def default(labels) value_object(type, @name, @name, labels, @multiprocess_mode) end # Sets the value for the given label set def set(labels, value) @values[label_set_for(labels)].set(value) end def increment(labels, value) @values[label_set_for(labels)].increment(value) end def decrement(labels, value) @values[label_set_for(labels)].decrement(value) end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/000077500000000000000000000000001442423076500240675ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/entry_parser.rb000066400000000000000000000101361442423076500271320ustar00rootroot00000000000000require 'prometheus/client/helper/json_parser' module Prometheus module Client module Helper module EntryParser class ParsingError < RuntimeError; end MINIMUM_SIZE = 8 START_POSITION = 8 VALUE_BYTES = 8 ENCODED_LENGTH_BYTES = 4 def used slice(0..3).unpack('l')[0] end def parts @parts ||= File.basename(filepath, '.db') .split('_') .map { |e| e.gsub(/-\d+$/, '') } # remove trailing -number end def type parts[0].to_sym end def pid (parts[2..-1] || []).join('_') end def multiprocess_mode parts[1] end def empty? size < MINIMUM_SIZE || used.zero? end def entries(ignore_errors = false) return Enumerator.new {} if empty? Enumerator.new do |yielder| used_ = used # cache used to avoid unnecessary unpack operations pos = START_POSITION # used + padding offset while pos < used_ && pos < size && pos > 0 data = slice(pos..-1) unless data raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors pos += 8 next end encoded_len, first_encoded_bytes = data.unpack('LL') if encoded_len.nil? || encoded_len.zero? || first_encoded_bytes.nil? || first_encoded_bytes.zero? # do not parse empty data pos += 8 next end entry_len = ENCODED_LENGTH_BYTES + encoded_len padding_len = 8 - entry_len % 8 value_offset = entry_len + padding_len # align to 8 bytes pos += value_offset if value_offset > 0 && (pos + VALUE_BYTES) <= size # if positions are safe yielder.yield data, encoded_len, value_offset, pos else raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors end pos += VALUE_BYTES end end end def parsed_entries(ignore_errors = false) result = entries(ignore_errors).map do |data, encoded_len, value_offset, _| begin encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset)) [encoded, value] rescue ArgumentError => e Prometheus::Client.logger.debug("Error processing data: #{bin_to_hex(data[0, 7])} len: #{encoded_len} value_offset: #{value_offset}") raise ParsingError, e unless ignore_errors end end result.reject!(&:nil?) if ignore_errors result end def to_metrics(metrics = {}, ignore_errors = false) parsed_entries(ignore_errors).each do |key, value| begin metric_name, name, labelnames, labelvalues = JsonParser.load(key) labelnames ||= [] labelvalues ||= [] metric = metrics.fetch(metric_name, metric_name: metric_name, help: 'Multiprocess metric', type: type, samples: []) if type == :gauge metric[:multiprocess_mode] = multiprocess_mode metric[:samples] += [[name, labelnames.zip(labelvalues) + [['pid', pid]], value]] else # The duplicates and labels are fixed in the next for. metric[:samples] += [[name, labelnames.zip(labelvalues), value]] end metrics[metric_name] = metric rescue JSON::ParserError => e raise ParsingError(e) unless ignore_errors end end metrics.reject! { |e| e.nil? } if ignore_errors metrics end private def bin_to_hex(s) s.each_byte.map { |b| b.to_s(16) }.join end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/file_locker.rb000066400000000000000000000023611442423076500266740ustar00rootroot00000000000000module Prometheus module Client module Helper class FileLocker class << self LOCK_FILE_MUTEX = Mutex.new def lock_to_process(filepath) LOCK_FILE_MUTEX.synchronize do @file_locks ||= {} return false if @file_locks[filepath] file = File.open(filepath, 'ab') if file.flock(File::LOCK_NB | File::LOCK_EX) @file_locks[filepath] = file return true else return false end end end def unlock(filepath) LOCK_FILE_MUTEX.synchronize do @file_locks ||= {} return false unless @file_locks[filepath] file = @file_locks[filepath] file.flock(File::LOCK_UN) file.close @file_locks.delete(filepath) end end def unlock_all LOCK_FILE_MUTEX.synchronize do @file_locks ||= {} @file_locks.values.each do |file| file.flock(File::LOCK_UN) file.close end @file_locks = {} end end end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/json_parser.rb000066400000000000000000000007011442423076500267370ustar00rootroot00000000000000require 'json' module Prometheus module Client module Helper module JsonParser class << self if defined?(Oj) def load(s) Oj.load(s) rescue Oj::ParseError, EncodingError => e raise JSON::ParserError.new(e.message) end else def load(s) JSON.parse(s) end end end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/metrics_processing.rb000066400000000000000000000030251442423076500303160ustar00rootroot00000000000000module Prometheus module Client module Helper module MetricsProcessing def self.merge_metrics(metrics) metrics.each_value do |metric| metric[:samples] = merge_samples(metric[:samples], metric[:type], metric[:multiprocess_mode]).map do |(name, labels), value| [name, labels.to_h, value] end end end def self.merge_samples(raw_samples, metric_type, multiprocess_mode) samples = {} raw_samples.each do |name, labels, value| without_pid = labels.reject { |l| l[0] == 'pid' } case metric_type when :gauge case multiprocess_mode when 'min' s = samples.fetch([name, without_pid], value) samples[[name, without_pid]] = [s, value].min when 'max' s = samples.fetch([name, without_pid], value) samples[[name, without_pid]] = [s, value].max when 'livesum' s = samples.fetch([name, without_pid], 0.0) samples[[name, without_pid]] = s + value else # all/liveall samples[[name, labels]] = value end else # Counter, Histogram and Summary. s = samples.fetch([name, without_pid], 0.0) samples[[name, without_pid]] = s + value end end samples end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/metrics_representation.rb000066400000000000000000000026521442423076500312110ustar00rootroot00000000000000module Prometheus module Client module Helper module MetricsRepresentation METRIC_LINE = '%s%s %s'.freeze TYPE_LINE = '# TYPE %s %s'.freeze HELP_LINE = '# HELP %s %s'.freeze LABEL = '%s="%s"'.freeze SEPARATOR = ','.freeze DELIMITER = "\n".freeze REGEX = { doc: /[\n\\]/, label: /[\n\\"]/ }.freeze REPLACE = { "\n" => '\n', '\\' => '\\\\', '"' => '\"' }.freeze def self.to_text(metrics) lines = [] metrics.each do |name, metric| lines << format(HELP_LINE, name, escape(metric[:help])) lines << format(TYPE_LINE, name, metric[:type]) metric[:samples].each do |metric_name, labels, value| lines << metric(metric_name, format_labels(labels), value) end end # there must be a trailing delimiter (lines << nil).join(DELIMITER) end def self.metric(name, labels, value) format(METRIC_LINE, name, labels, value) end def self.format_labels(set) return if set.empty? strings = set.each_with_object([]) do |(key, value), memo| memo << format(LABEL, key, escape(value, :label)) end "{#{strings.join(SEPARATOR)}}" end def self.escape(string, format = :doc) string.to_s.gsub(REGEX[format], REPLACE) end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/mmaped_file.rb000066400000000000000000000032251442423076500266600ustar00rootroot00000000000000require 'prometheus/client/helper/entry_parser' require 'prometheus/client/helper/file_locker' # load precompiled extension if available begin ruby_version = /(\d+\.\d+)/.match(RUBY_VERSION) require_relative "../../../#{ruby_version}/fast_mmaped_file" rescue LoadError require 'fast_mmaped_file' end module Prometheus module Client module Helper class MmapedFile < FastMmapedFile include EntryParser attr_reader :filepath, :size def initialize(filepath) @filepath = filepath File.open(filepath, 'a+b') do |file| file.truncate(initial_mmap_file_size) if file.size < MINIMUM_SIZE @size = file.size end super(filepath) end def close munmap FileLocker.unlock(filepath) end private def initial_mmap_file_size Prometheus::Client.configuration.initial_mmap_file_size end public class << self def open(filepath) MmapedFile.new(filepath) end def ensure_exclusive_file(file_prefix = 'mmaped_file') (0..Float::INFINITY).lazy .map { |f_num| "#{file_prefix}_#{Prometheus::Client.pid}-#{f_num}.db" } .map { |filename| File.join(Prometheus::Client.configuration.multiprocess_files_dir, filename) } .find { |path| Helper::FileLocker.lock_to_process(path) } end def open_exclusive_file(file_prefix = 'mmaped_file') filename = Helper::MmapedFile.ensure_exclusive_file(file_prefix) open(filename) end end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/helper/plain_file.rb000066400000000000000000000010271442423076500265160ustar00rootroot00000000000000require 'prometheus/client/helper/entry_parser' module Prometheus module Client module Helper # Parses DB files without using mmap class PlainFile include EntryParser attr_reader :filepath def source @data ||= File.read(filepath, mode: 'rb') end def initialize(filepath) @filepath = filepath end def slice(*args) source.slice(*args) end def size source.length end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/histogram.rb000066400000000000000000000046601442423076500251400ustar00rootroot00000000000000require 'prometheus/client/metric' require 'prometheus/client/uses_value_type' module Prometheus module Client # A histogram samples observations (usually things like request durations # or response sizes) and counts them in configurable buckets. It also # provides a sum of all observed values. class Histogram < Metric # Value represents the state of a Histogram at a given point. class Value < Hash include UsesValueType attr_accessor :sum, :total, :total_inf def initialize(type, name, labels, buckets) @sum = value_object(type, name, "#{name}_sum", labels) @total = value_object(type, name, "#{name}_count", labels) @total_inf = value_object(type, name, "#{name}_bucket", labels.merge(le: "+Inf")) buckets.each do |bucket| self[bucket] = value_object(type, name, "#{name}_bucket", labels.merge(le: bucket.to_s)) end end def observe(value) @sum.increment(value) @total.increment() @total_inf.increment() each_key do |bucket| self[bucket].increment() if value <= bucket end end def get() hash = {} each_key do |bucket| hash[bucket] = self[bucket].get() end hash end end # DEFAULT_BUCKETS are the default Histogram buckets. The default buckets # are tailored to broadly measure the response time (in seconds) of a # network service. (From DefBuckets client_golang) DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10].freeze # Offer a way to manually specify buckets def initialize(name, docstring, base_labels = {}, buckets = DEFAULT_BUCKETS) raise ArgumentError, 'Unsorted buckets, typo?' unless sorted? buckets @buckets = buckets super(name, docstring, base_labels) end def type :histogram end def observe(labels, value) label_set = label_set_for(labels) synchronize { @values[label_set].observe(value) } end private def default(labels) # TODO: default function needs to know key of hash info (label names and values) Value.new(type, @name, labels, @buckets) end def sorted?(bucket) bucket.each_cons(2).all? { |i, j| i <= j } end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/label_set_validator.rb000066400000000000000000000046241442423076500271420ustar00rootroot00000000000000# encoding: UTF-8 module Prometheus module Client # LabelSetValidator ensures that all used label sets comply with the # Prometheus specification. class LabelSetValidator # TODO: we might allow setting :instance in the future RESERVED_LABELS = [:job, :instance].freeze class LabelSetError < StandardError; end class InvalidLabelSetError < LabelSetError; end class InvalidLabelError < LabelSetError; end class ReservedLabelError < LabelSetError; end def initialize(reserved_labels = []) @reserved_labels = (reserved_labels + RESERVED_LABELS).freeze @validated = {} end def valid?(labels) unless labels.is_a?(Hash) raise InvalidLabelSetError, "#{labels} is not a valid label set" end labels.all? do |key, value| validate_symbol(key) validate_name(key) validate_reserved_key(key) validate_value(key, value) end end def validate(labels) return labels if @validated.key?(labels.hash) valid?(labels) unless @validated.empty? || match?(labels, @validated.first.last) raise InvalidLabelSetError, "labels must have the same signature: (#{label_diff(labels, @validated.first.last)})" end @validated[labels.hash] = labels end private def label_diff(a, b) "expected keys: #{b.keys.sort}, got: #{a.keys.sort}" end def match?(a, b) a.keys.sort == b.keys.sort end def validate_symbol(key) return true if key.is_a?(Symbol) raise InvalidLabelError, "label #{key} is not a symbol" end def validate_name(key) return true unless key.to_s.start_with?('__') raise ReservedLabelError, "label #{key} must not start with __" end def validate_reserved_key(key) return true unless @reserved_labels.include?(key) raise ReservedLabelError, "#{key} is reserved" end def validate_value(key, value) return true if value.is_a?(String) || value.is_a?(Numeric) || value.is_a?(Symbol) || value.is_a?(FalseClass) || value.is_a?(TrueClass) || value.nil? raise InvalidLabelError, "#{key} does not contain a valid value (type #{value.class})" end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/metric.rb000066400000000000000000000037651442423076500244330ustar00rootroot00000000000000require 'thread' require 'prometheus/client/label_set_validator' require 'prometheus/client/uses_value_type' module Prometheus module Client class Metric include UsesValueType attr_reader :name, :docstring, :base_labels def initialize(name, docstring, base_labels = {}) @mutex = Mutex.new @validator = case type when :summary LabelSetValidator.new(['quantile']) when :histogram LabelSetValidator.new(['le']) else LabelSetValidator.new end @values = Hash.new { |hash, key| hash[key] = default(key) } validate_name(name) validate_docstring(docstring) @validator.valid?(base_labels) @name = name @docstring = docstring @base_labels = base_labels end # Returns the value for the given label set def get(labels = {}) label_set = label_set_for(labels) @validator.valid?(label_set) @values[label_set].get end # Returns all label sets with their values def values synchronize do @values.each_with_object({}) do |(labels, value), memo| memo[labels] = value end end end private def touch_default_value @values[label_set_for({})] end def default(labels) value_object(type, @name, @name, labels) end def validate_name(name) return true if name.is_a?(Symbol) raise ArgumentError, 'given name must be a symbol' end def validate_docstring(docstring) return true if docstring.respond_to?(:empty?) && !docstring.empty? raise ArgumentError, 'docstring must be given' end def label_set_for(labels) @validator.validate(@base_labels.merge(labels)) end def synchronize(&block) @mutex.synchronize(&block) end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/mmaped_dict.rb000066400000000000000000000041051442423076500254030ustar00rootroot00000000000000require 'prometheus/client/helper/mmaped_file' require 'prometheus/client/helper/plain_file' require 'prometheus/client' module Prometheus module Client class ParsingError < StandardError end # A dict of doubles, backed by an mmapped file. # # The file starts with a 4 byte int, indicating how much of it is used. # Then 4 bytes of padding. # There's then a number of entries, consisting of a 4 byte int which is the # size of the next field, a utf-8 encoded string key, padding to an 8 byte # alignment, and then a 8 byte float which is the value. class MmapedDict attr_reader :m, :used, :positions def self.read_all_values(f) Helper::PlainFile.new(f).entries.map do |data, encoded_len, value_offset, _| encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset)) [encoded, value] end end def initialize(m) @mutex = Mutex.new @m = m # @m.mlock # TODO: Ensure memory is locked to RAM @positions = {} read_all_positions.each do |key, pos| @positions[key] = pos end rescue StandardError => e raise ParsingError, "exception #{e} while processing metrics file #{path}" end def read_value(key) @m.fetch_entry(@positions, key, 0.0) end def write_value(key, value) @m.upsert_entry(@positions, key, value) end def path @m.filepath if @m end def close @m.sync @m.close rescue TypeError => e Prometheus::Client.logger.warn("munmap raised error #{e}") end def inspect "#<#{self.class}:0x#{(object_id << 1).to_s(16)}>" end private def init_value(key) @m.add_entry(@positions, key, 0.0) end # Yield (key, pos). No locking is performed. def read_all_positions @m.entries.map do |data, encoded_len, _, absolute_pos| encoded, = data.unpack(format('@4A%d', encoded_len)) [encoded, absolute_pos] end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/mmaped_value.rb000066400000000000000000000067161442423076500256060ustar00rootroot00000000000000require 'prometheus/client' require 'prometheus/client/mmaped_dict' require 'json' module Prometheus module Client # A float protected by a mutex backed by a per-process mmaped file. class MmapedValue VALUE_LOCK = Mutex.new @@files = {} @@pid = -1 def initialize(type, metric_name, name, labels, multiprocess_mode = '') @file_prefix = type.to_s @metric_name = metric_name @name = name @labels = labels if type == :gauge @file_prefix += '_' + multiprocess_mode.to_s end @pid = -1 @mutex = Mutex.new initialize_file end def increment(amount = 1) @mutex.synchronize do initialize_file if pid_changed? @value += amount write_value(@key, @value) @value end end def decrement(amount = 1) increment(-amount) end def set(value) @mutex.synchronize do initialize_file if pid_changed? @value = value write_value(@key, @value) @value end end def get @mutex.synchronize do initialize_file if pid_changed? return @value end end def pid_changed? @pid != Process.pid end # method needs to be run in VALUE_LOCK mutex def unsafe_reinitialize_file(check_pid = true) unsafe_initialize_file if !check_pid || pid_changed? end def self.reset_and_reinitialize VALUE_LOCK.synchronize do @@pid = Process.pid @@files = {} ObjectSpace.each_object(MmapedValue).each do |v| v.unsafe_reinitialize_file(false) end end end def self.reset_on_pid_change if pid_changed? @@pid = Process.pid @@files = {} end end def self.reinitialize_on_pid_change VALUE_LOCK.synchronize do reset_on_pid_change ObjectSpace.each_object(MmapedValue, &:unsafe_reinitialize_file) end end def self.pid_changed? @@pid != Process.pid end def self.multiprocess true end private def initialize_file VALUE_LOCK.synchronize do unsafe_initialize_file end end def unsafe_initialize_file self.class.reset_on_pid_change @pid = Process.pid unless @@files.has_key?(@file_prefix) unless @file.nil? @file.close end mmaped_file = Helper::MmapedFile.open_exclusive_file(@file_prefix) @@files[@file_prefix] = MmapedDict.new(mmaped_file) end @file = @@files[@file_prefix] @key = rebuild_key @value = read_value(@key) end def rebuild_key keys = @labels.keys.sort values = @labels.values_at(*keys) [@metric_name, @name, keys, values].to_json end def write_value(key, val) @file.write_value(key, val) rescue StandardError => e Prometheus::Client.logger.warn("writing value to #{@file.path} failed with #{e}") Prometheus::Client.logger.debug(e.backtrace.join("\n")) end def read_value(key) @file.read_value(key) rescue StandardError => e Prometheus::Client.logger.warn("reading value from #{@file.path} failed with #{e}") Prometheus::Client.logger.debug(e.backtrace.join("\n")) 0 end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/page_size.rb000066400000000000000000000006051442423076500251040ustar00rootroot00000000000000require 'open3' module Prometheus module Client module PageSize def self.page_size(fallback_page_size: 4096) stdout, status = Open3.capture2('getconf PAGESIZE') return fallback_page_size if status.nil? || !status.success? page_size = stdout.chomp.to_i return fallback_page_size if page_size <= 0 page_size end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/push.rb000066400000000000000000000147331442423076500241240ustar00rootroot00000000000000# encoding: UTF-8 require 'base64' require 'thread' require 'net/http' require 'uri' require 'erb' require 'set' require 'prometheus/client' require 'prometheus/client/formats/text' require 'prometheus/client/label_set_validator' module Prometheus # Client is a ruby implementation for a Prometheus compatible client. module Client # Push implements a simple way to transmit a given registry to a given # Pushgateway. class Push class HttpError < StandardError; end class HttpRedirectError < HttpError; end class HttpClientError < HttpError; end class HttpServerError < HttpError; end DEFAULT_GATEWAY = 'http://localhost:9091'.freeze PATH = '/metrics/job/%s'.freeze SUPPORTED_SCHEMES = %w(http https).freeze attr_reader :job, :gateway, :path def initialize(job:, gateway: DEFAULT_GATEWAY, grouping_key: {}, **kwargs) raise ArgumentError, "job cannot be nil" if job.nil? raise ArgumentError, "job cannot be empty" if job.empty? @validator = LabelSetValidator.new() @validator.validate(grouping_key) @mutex = Mutex.new @job = job @gateway = gateway || DEFAULT_GATEWAY @grouping_key = grouping_key @path = build_path(job, grouping_key) @uri = parse("#{@gateway}#{@path}") validate_no_basic_auth!(@uri) @http = Net::HTTP.new(@uri.host, @uri.port) @http.use_ssl = (@uri.scheme == 'https') @http.open_timeout = kwargs[:open_timeout] if kwargs[:open_timeout] @http.read_timeout = kwargs[:read_timeout] if kwargs[:read_timeout] end def basic_auth(user, password) @user = user @password = password end def add(registry) synchronize do request(Net::HTTP::Post, registry) end end def replace(registry) synchronize do request(Net::HTTP::Put, registry) end end def delete synchronize do request(Net::HTTP::Delete) end end private def parse(url) uri = URI.parse(url) unless SUPPORTED_SCHEMES.include?(uri.scheme) raise ArgumentError, 'only HTTP gateway URLs are supported currently.' end uri rescue URI::InvalidURIError => e raise ArgumentError, "#{url} is not a valid URL: #{e}" end def build_path(job, grouping_key) path = format(PATH, ERB::Util::url_encode(job)) grouping_key.each do |label, value| if value.include?('/') encoded_value = Base64.urlsafe_encode64(value) path += "/#{label}@base64/#{encoded_value}" # While it's valid for the urlsafe_encode64 function to return an # empty string when the input string is empty, it doesn't work for # our specific use case as we're putting the result into a URL path # segment. A double slash (`//`) can be normalised away by HTTP # libraries, proxies, and web servers. # # For empty strings, we use a single padding character (`=`) as the # value. # # See the pushgateway docs for more details: # # https://github.com/prometheus/pushgateway/blob/6393a901f56d4dda62cd0f6ab1f1f07c495b6354/README.md#url elsif value.empty? path += "/#{label}@base64/=" else path += "/#{label}/#{ERB::Util::url_encode(value)}" end end path end def request(req_class, registry = nil) validate_no_label_clashes!(registry) if registry req = req_class.new(@uri) req.content_type = Formats::Text::CONTENT_TYPE req.basic_auth(@user, @password) if @user req.body = Formats::Text.marshal(registry) if registry response = @http.request(req) validate_response!(response) response end def synchronize @mutex.synchronize { yield } end def validate_no_basic_auth!(uri) if uri.user || uri.password raise ArgumentError, <<~EOF Setting Basic Auth credentials in the gateway URL is not supported, please call the `basic_auth` method. Received username `#{uri.user}` in gateway URL. Instead of passing Basic Auth credentials like this: ``` push = Prometheus::Client::Push.new(job: "my-job", gateway: "http://user:password@localhost:9091") ``` please pass them like this: ``` push = Prometheus::Client::Push.new(job: "my-job", gateway: "http://localhost:9091") push.basic_auth("user", "password") ``` While URLs do support passing Basic Auth credentials using the `http://user:password@example.com/` syntax, the username and password in that syntax have to follow the usual rules for URL encoding of characters per RFC 3986 (https://datatracker.ietf.org/doc/html/rfc3986#section-2.1). Rather than place the burden of correctly performing that encoding on users of this gem, we decided to have a separate method for supplying Basic Auth credentials, with no requirement to URL encode the characters in them. EOF end end def validate_no_label_clashes!(registry) # There's nothing to check if we don't have a grouping key return if @grouping_key.empty? # We could be doing a lot of comparisons, so let's do them against a # set rather than an array grouping_key_labels = @grouping_key.keys.to_set registry.metrics.each do |metric| metric.values.keys.first.keys.each do |label| if grouping_key_labels.include?(label) raise LabelSetValidator::InvalidLabelSetError, "label :#{label} from grouping key collides with label of the " \ "same name from metric :#{metric.name} and would overwrite it" end end end end def validate_response!(response) status = Integer(response.code) if status >= 300 message = "status: #{response.code}, message: #{response.message}, body: #{response.body}" if status <= 399 raise HttpRedirectError, message elsif status <= 499 raise HttpClientError, message else raise HttpServerError, message end end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/rack/000077500000000000000000000000001442423076500235305ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/lib/prometheus/client/rack/collector.rb000066400000000000000000000047031442423076500260470ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client' module Prometheus module Client module Rack # Collector is a Rack middleware that provides a sample implementation of # a HTTP tracer. The default label builder can be modified to export a # different set of labels per recorded metric. class Collector attr_reader :app, :registry def initialize(app, options = {}, &label_builder) @app = app @registry = options[:registry] || Client.registry @label_builder = label_builder || DEFAULT_LABEL_BUILDER init_request_metrics init_exception_metrics end def call(env) # :nodoc: trace(env) { @app.call(env) } end protected DEFAULT_LABEL_BUILDER = proc do |env| { method: env['REQUEST_METHOD'].downcase, host: env['HTTP_HOST'].to_s, path: env['PATH_INFO'].to_s, } end def init_request_metrics @requests = @registry.counter( :http_requests_total, 'A counter of the total number of HTTP requests made.', ) @durations = @registry.summary( :http_request_duration_seconds, 'A summary of the response latency.', ) @durations_hist = @registry.histogram( :http_req_duration_seconds, 'A histogram of the response latency.', ) end def init_exception_metrics @exceptions = @registry.counter( :http_exceptions_total, 'A counter of the total number of exceptions raised.', ) end def trace(env) start = Time.now yield.tap do |response| duration = (Time.now - start).to_f record(labels(env, response), duration) end rescue => exception @exceptions.increment(exception: exception.class.name) raise end def labels(env, response) @label_builder.call(env).tap do |labels| labels[:code] = response.first.to_s end end def record(labels, duration) @requests.increment(labels) @durations.observe(labels, duration) @durations_hist.observe(labels, duration) rescue => exception @exceptions.increment(exception: exception.class.name) raise nil end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/rack/exporter.rb000066400000000000000000000050551442423076500257320ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client' require 'prometheus/client/formats/text' module Prometheus module Client module Rack # Exporter is a Rack middleware that provides a sample implementation of # a Prometheus HTTP client API. class Exporter attr_reader :app, :registry, :path FORMATS = [Formats::Text].freeze FALLBACK = Formats::Text def initialize(app, options = {}) @app = app @registry = options[:registry] || Client.registry @path = options[:path] || '/metrics' @acceptable = build_dictionary(FORMATS, FALLBACK) end def call(env) if env['PATH_INFO'] == @path format = negotiate(env['HTTP_ACCEPT'], @acceptable) format ? respond_with(format) : not_acceptable(FORMATS) else @app.call(env) end end private def negotiate(accept, formats) accept = '*/*' if accept.to_s.empty? parse(accept).each do |content_type, _| return formats[content_type] if formats.key?(content_type) end nil end def parse(header) header.to_s.split(/\s*,\s*/).map do |type| attributes = type.split(/\s*;\s*/) quality = extract_quality(attributes) [attributes.join('; '), quality] end.sort_by(&:last).reverse end def extract_quality(attributes, default = 1.0) quality = default attributes.delete_if do |attr| quality = attr.split('q=').last.to_f if attr.start_with?('q=') end quality end def respond_with(format) response = if Prometheus::Client.configuration.value_class.multiprocess format.marshal_multiprocess else format.marshal end [ 200, { 'Content-Type' => format::CONTENT_TYPE }, [response], ] end def not_acceptable(formats) types = formats.map { |format| format::MEDIA_TYPE } [ 406, { 'Content-Type' => 'text/plain' }, ["Supported media types: #{types.join(', ')}"], ] end def build_dictionary(formats, fallback) formats.each_with_object('*/*' => fallback) do |format, memo| memo[format::CONTENT_TYPE] = format memo[format::MEDIA_TYPE] = format end end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/registry.rb000066400000000000000000000027321442423076500250110ustar00rootroot00000000000000# encoding: UTF-8 require 'thread' require 'prometheus/client/counter' require 'prometheus/client/summary' require 'prometheus/client/gauge' require 'prometheus/client/histogram' module Prometheus module Client # Registry class Registry class AlreadyRegisteredError < StandardError; end def initialize @metrics = {} @mutex = Mutex.new end def register(metric) name = metric.name @mutex.synchronize do if exist?(name.to_sym) raise AlreadyRegisteredError, "#{name} has already been registered" else @metrics[name.to_sym] = metric end end metric end def counter(name, docstring, base_labels = {}) register(Counter.new(name, docstring, base_labels)) end def summary(name, docstring, base_labels = {}) register(Summary.new(name, docstring, base_labels)) end def gauge(name, docstring, base_labels = {}, multiprocess_mode = :all) register(Gauge.new(name, docstring, base_labels, multiprocess_mode)) end def histogram(name, docstring, base_labels = {}, buckets = Histogram::DEFAULT_BUCKETS) register(Histogram.new(name, docstring, base_labels, buckets)) end def exist?(name) @metrics.key?(name) end def get(name) @metrics[name.to_sym] end def metrics @metrics.values end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/simple_value.rb000066400000000000000000000007071442423076500256260ustar00rootroot00000000000000require 'json' module Prometheus module Client class SimpleValue def initialize(_type, _metric_name, _name, _labels, *_args) @value = 0.0 end def set(value) @value = value end def increment(by = 1) @value += by end def decrement(by = 1) @value -= by end def get @value end def self.multiprocess false end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/summary.rb000066400000000000000000000032241442423076500246330ustar00rootroot00000000000000require 'prometheus/client/metric' require 'prometheus/client/uses_value_type' module Prometheus module Client # Summary is an accumulator for samples. It captures Numeric data and # provides an efficient quantile calculation mechanism. class Summary < Metric extend Gem::Deprecate # Value represents the state of a Summary at a given point. class Value < Hash include UsesValueType attr_accessor :sum, :total def initialize(type, name, labels) @sum = value_object(type, name, "#{name}_sum", labels) @total = value_object(type, name, "#{name}_count", labels) end def observe(value) @sum.increment(value) @total.increment end end def initialize(name, docstring, base_labels = {}) super(name, docstring, base_labels) end def type :summary end # Records a given value. def observe(labels, value) label_set = label_set_for(labels) synchronize { @values[label_set].observe(value) } end alias add observe deprecate :add, :observe, 2016, 10 # Returns the value for the given label set def get(labels = {}) @validator.valid?(labels) synchronize do @values[labels].sum.get end end # Returns all label sets with their values def values synchronize do @values.each_with_object({}) do |(labels, value), memo| memo[labels] = value.sum end end end private def default(labels) Value.new(type, @name, labels) end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/support/000077500000000000000000000000001442423076500243245ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/lib/prometheus/client/support/unicorn.rb000066400000000000000000000014241442423076500263270ustar00rootroot00000000000000module Prometheus module Client module Support module Unicorn def self.worker_pid_provider wid = worker_id if wid.nil? "process_id_#{Process.pid}" else "worker_id_#{wid}" end end def self.worker_id match = $0.match(/worker\[([^\]]+)\]/) if match match[1] else object_based_worker_id end end def self.object_based_worker_id return unless defined?(::Unicorn::Worker) workers = ObjectSpace.each_object(::Unicorn::Worker) return if workers.nil? workers_first = workers.first workers_first.nr unless workers_first.nil? end end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/uses_value_type.rb000066400000000000000000000013331442423076500263510ustar00rootroot00000000000000require 'prometheus/client/simple_value' module Prometheus module Client # Module providing convenience methods for creating value_object module UsesValueType def value_class Prometheus::Client.configuration.value_class end def value_object(type, metric_name, name, labels, *args) value_class.new(type, metric_name, name, labels, *args) rescue StandardError => e Prometheus::Client.logger.info("error #{e} while creating instance of #{value_class} defaulting to SimpleValue") Prometheus::Client.logger.debug("error #{e} backtrace #{e.backtrace.join("\n")}") Prometheus::Client::SimpleValue.new(type, metric_name, name, labels) end end end end prometheus-client-mmap-v0.23.1/lib/prometheus/client/version.rb000066400000000000000000000001121442423076500246140ustar00rootroot00000000000000module Prometheus module Client VERSION = '0.23.1'.freeze end end prometheus-client-mmap-v0.23.1/prometheus-client-mmap.gemspec000066400000000000000000000026231442423076500243400ustar00rootroot00000000000000#encoding: utf-8 $LOAD_PATH.push File.expand_path('../lib', __FILE__) require 'prometheus/client/version' Gem::Specification.new do |s| s.name = 'prometheus-client-mmap' s.version = Prometheus::Client::VERSION s.summary = 'A suite of instrumentation metric primitives' \ 'that can be exposed through a web services interface.' s.authors = ['Tobias Schmidt', 'PaweÅ‚ Chojnacki', 'Stan Hu', 'Will Chandler'] s.email = ['ts@soundcloud.com', 'pawel@gitlab.com', 'stanhu@gmail.com', 'wchandler@gitlab.com'] s.homepage = 'https://gitlab.com/gitlab-org/prometheus-client-mmap' s.license = 'Apache-2.0' s.files = `git ls-files README.md .tool-versions lib ext vendor`.split("\n") s.require_paths = ['lib'] s.extensions = Dir.glob('{ext/**/extconf.rb}') # This C extension uses ObjectSpace::WeakRef with Integer keys (https://bugs.ruby-lang.org/issues/16035) s.required_ruby_version = '>= 2.7.0' s.add_dependency "rb_sys", "~> 0.9" s.add_development_dependency 'fuzzbert', '~> 1.0', '>= 1.0.4' s.add_development_dependency 'gem_publisher', '~> 1' s.add_development_dependency 'pry', '~> 0.12.2' s.add_development_dependency "rake-compiler", "~> 1.2.1" s.add_development_dependency "rake-compiler-dock", "~> 1.3.0" s.add_development_dependency 'ruby-prof', '~> 0.16.2' end prometheus-client-mmap-v0.23.1/spec/000077500000000000000000000000001442423076500173235ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/examples/000077500000000000000000000000001442423076500211415ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/examples/metric_example.rb000066400000000000000000000022771442423076500244740ustar00rootroot00000000000000# encoding: UTF-8 shared_examples_for Prometheus::Client::Metric do subject { described_class.new(:foo, 'foo description') } describe '.new' do it 'returns a new metric' do expect(subject).to be end it 'raises an exception if a reserved base label is used' do exception = Prometheus::Client::LabelSetValidator::ReservedLabelError expect do described_class.new(:foo, 'foo docstring', __name__: 'reserved') end.to raise_exception exception end it 'raises an exception if the given name is blank' do expect do described_class.new(nil, 'foo') end.to raise_exception ArgumentError end it 'raises an exception if docstring is missing' do expect do described_class.new(:foo, '') end.to raise_exception ArgumentError end end describe '#type' do it 'returns the metric type as symbol' do expect(subject.type).to be_a(Symbol) end end describe '#get' do it 'returns the current metric value' do expect(subject.get).to be_a(type) end it 'returns the current metric value for a given label set' do expect(subject.get(test: 'label')).to be_a(type) end end end prometheus-client-mmap-v0.23.1/spec/prometheus/000077500000000000000000000000001442423076500215165ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/prometheus/client/000077500000000000000000000000001442423076500227745ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/prometheus/client/counter_spec.rb000066400000000000000000000033631442423076500260170ustar00rootroot00000000000000require 'prometheus/client/counter' require 'prometheus/client' require 'examples/metric_example' describe Prometheus::Client::Counter do before do allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return('tmp/') end let(:counter) { Prometheus::Client::Counter.new(:foo, 'foo description') } it_behaves_like Prometheus::Client::Metric do let(:type) { Float } end describe 'Memory Error tests' do it "creating many counters shouldn't cause a SIGBUS" do 4.times do |j| 9999.times do |i| counter = Prometheus::Client::Counter.new("foo#{j}_z#{i}".to_sym, 'some string') counter.increment end GC.start end end end describe '#increment' do it 'increments the counter' do expect { counter.increment }.to change { counter.get }.by(1) end it 'increments the counter for a given label set' do expect do expect do counter.increment(test: 'label') end.to change { counter.get(test: 'label') }.by(1) end.to_not change { counter.get(test: 'other_label') } end it 'increments the counter by a given value' do expect do counter.increment({}, 5) end.to change { counter.get }.by(5) end it 'raises an ArgumentError on negative increments' do expect do counter.increment({}, -1) end.to raise_error ArgumentError end it 'returns the new counter value' do expect(counter.increment).to eql(counter.get) end it 'is thread safe' do expect do Array.new(10) do Thread.new do 10.times { counter.increment } end end.each(&:join) end.to change { counter.get }.by(100) end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/formats/000077500000000000000000000000001442423076500244475ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/prometheus/client/formats/text_spec.rb000066400000000000000000000212151442423076500267730ustar00rootroot00000000000000require 'spec_helper' require 'prometheus/client/formats/text' require 'prometheus/client/mmaped_value' describe Prometheus::Client::Formats::Text do context 'single process metrics' do let(:value_class) { Prometheus::Client::SimpleValue } let(:summary_value) do { 0.5 => 4.2, 0.9 => 8.32, 0.99 => 15.3 }.tap do |value| allow(value).to receive_messages(sum: 1243.21, total: 93) end end let(:histogram_value) do { 10 => 1, 20 => 2, 30 => 2 }.tap do |value| allow(value).to receive_messages(sum: 15.2, total: 2) end end let(:registry) do metrics = [ double( name: :foo, docstring: 'foo description', base_labels: { umlauts: 'Björn', utf: 'ä½–ä½¥' }, type: :counter, values: { { code: 'red' } => 42, { code: 'green' } => 3.14E42, { code: 'blue' } => -1.23e-45, }, ), double( name: :bar, docstring: "bar description\nwith newline", base_labels: { status: 'success' }, type: :gauge, values: { { code: 'pink' } => 15, }, ), double( name: :baz, docstring: 'baz "description" \\escaping', base_labels: {}, type: :counter, values: { { text: "with \"quotes\", \\escape \n and newline" } => 15, }, ), double( name: :qux, docstring: 'qux description', base_labels: { for: 'sake' }, type: :summary, values: { { code: '1' } => summary_value, }, ), double( name: :xuq, docstring: 'xuq description', base_labels: {}, type: :histogram, values: { { code: 'ah' } => histogram_value, }, ), ] metrics.each do |m| m.values.each do |k, v| m.values[k] = value_class.new(m.type, m.name, m.name, k) m.values[k].set(v) end end double(metrics: metrics) end describe '.marshal' do it 'returns a Text format version 0.0.4 compatible representation' do expect(subject.marshal(registry)).to eql <<-'TEXT'.gsub(/^\s+/, '') # HELP foo foo description # TYPE foo counter foo{umlauts="Björn",utf="ä½–ä½¥",code="red"} 42 foo{umlauts="Björn",utf="ä½–ä½¥",code="green"} 3.14e+42 foo{umlauts="Björn",utf="ä½–ä½¥",code="blue"} -1.23e-45 # HELP bar bar description\nwith newline # TYPE bar gauge bar{status="success",code="pink"} 15 # HELP baz baz "description" \\escaping # TYPE baz counter baz{text="with \"quotes\", \\escape \n and newline"} 15 # HELP qux qux description # TYPE qux summary qux{for="sake",code="1",quantile="0.5"} 4.2 qux{for="sake",code="1",quantile="0.9"} 8.32 qux{for="sake",code="1",quantile="0.99"} 15.3 qux_sum{for="sake",code="1"} 1243.21 qux_count{for="sake",code="1"} 93 # HELP xuq xuq description # TYPE xuq histogram xuq{code="ah",le="10"} 1 xuq{code="ah",le="20"} 2 xuq{code="ah",le="30"} 2 xuq{code="ah",le="+Inf"} 2 xuq_sum{code="ah"} 15.2 xuq_count{code="ah"} 2 TEXT end end end context 'multi process metrics', :temp_metrics_dir do let(:registry) { Prometheus::Client::Registry.new } before do allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return(temp_metrics_dir) # reset all current metrics Prometheus::Client::MmapedValue.class_variable_set(:@@files, {}) end context 'pid provider returns compound ID', :temp_metrics_dir, :sample_metrics do before do allow(Prometheus::Client.configuration).to receive(:pid_provider).and_return(-> { 'pid_provider_id_1' }) # Prometheus::Client::MmapedValue.class_variable_set(:@@files, {}) add_simple_metrics(registry) end it '.marshal_multiprocess' do expect(described_class.marshal_multiprocess(temp_metrics_dir, use_rust: true)).to eq <<-'TEXT'.gsub(/^\s+/, '') # HELP counter Multiprocess metric # TYPE counter counter counter{a="1",b="1"} 1 counter{a="1",b="2"} 1 counter{a="2",b="1"} 1 # HELP gauge Multiprocess metric # TYPE gauge gauge gauge{b="1"} 1 gauge{b="2"} 1 # HELP gauge_with_big_value Multiprocess metric # TYPE gauge_with_big_value gauge gauge_with_big_value{a="0.12345678901234566"} 0.12345678901234566 gauge_with_big_value{a="12345678901234567"} 12345678901234568 # HELP gauge_with_null_labels Multiprocess metric # TYPE gauge_with_null_labels gauge gauge_with_null_labels{a="",b=""} 1 # HELP gauge_with_pid Multiprocess metric # TYPE gauge_with_pid gauge gauge_with_pid{b="1",c="1",pid="pid_provider_id_1"} 1 # HELP histogram Multiprocess metric # TYPE histogram histogram histogram_bucket{a="1",le="+Inf"} 1 histogram_bucket{a="1",le="0.005"} 0 histogram_bucket{a="1",le="0.01"} 0 histogram_bucket{a="1",le="0.025"} 0 histogram_bucket{a="1",le="0.05"} 0 histogram_bucket{a="1",le="0.1"} 0 histogram_bucket{a="1",le="0.25"} 0 histogram_bucket{a="1",le="0.5"} 0 histogram_bucket{a="1",le="1"} 1 histogram_bucket{a="1",le="10"} 1 histogram_bucket{a="1",le="2.5"} 1 histogram_bucket{a="1",le="5"} 1 histogram_count{a="1"} 1 histogram_sum{a="1"} 1 # HELP summary Multiprocess metric # TYPE summary summary summary_count{a="1",b="1"} 1 summary_sum{a="1",b="1"} 1 TEXT end end context 'pid provider returns numerical value', :temp_metrics_dir, :sample_metrics do before do allow(Prometheus::Client.configuration).to receive(:pid_provider).and_return(-> { -1 }) add_simple_metrics(registry) end it '.marshal_multiprocess' do expect(described_class.marshal_multiprocess(temp_metrics_dir, use_rust: true)).to eq <<-'TEXT'.gsub(/^\s+/, '') # HELP counter Multiprocess metric # TYPE counter counter counter{a="1",b="1"} 1 counter{a="1",b="2"} 1 counter{a="2",b="1"} 1 # HELP gauge Multiprocess metric # TYPE gauge gauge gauge{b="1"} 1 gauge{b="2"} 1 # HELP gauge_with_big_value Multiprocess metric # TYPE gauge_with_big_value gauge gauge_with_big_value{a="0.12345678901234566"} 0.12345678901234566 gauge_with_big_value{a="12345678901234567"} 12345678901234568 # HELP gauge_with_null_labels Multiprocess metric # TYPE gauge_with_null_labels gauge gauge_with_null_labels{a="",b=""} 1 # HELP gauge_with_pid Multiprocess metric # TYPE gauge_with_pid gauge gauge_with_pid{b="1",c="1",pid="-1"} 1 # HELP histogram Multiprocess metric # TYPE histogram histogram histogram_bucket{a="1",le="+Inf"} 1 histogram_bucket{a="1",le="0.005"} 0 histogram_bucket{a="1",le="0.01"} 0 histogram_bucket{a="1",le="0.025"} 0 histogram_bucket{a="1",le="0.05"} 0 histogram_bucket{a="1",le="0.1"} 0 histogram_bucket{a="1",le="0.25"} 0 histogram_bucket{a="1",le="0.5"} 0 histogram_bucket{a="1",le="1"} 1 histogram_bucket{a="1",le="10"} 1 histogram_bucket{a="1",le="2.5"} 1 histogram_bucket{a="1",le="5"} 1 histogram_count{a="1"} 1 histogram_sum{a="1"} 1 # HELP summary Multiprocess metric # TYPE summary summary summary_count{a="1",b="1"} 1 summary_sum{a="1",b="1"} 1 TEXT end end context 'when OJ is available uses OJ to parse keys' do let(:oj) { double(oj) } before do stub_const 'Oj', oj allow(oj).to receive(:load) end end context 'with metric having whitespace and UTF chars', :temp_metrics_dir do before do registry.gauge(:gauge, "bar description\nwith newline", { umlauts: 'Björn', utf: 'ä½–ä½¥' }, :all).set({ umlauts: 'Björn', utf: 'ä½–ä½¥' }, 1) end xit '.marshall_multiprocess' do expect(described_class.marshal_multiprocess(temp_metrics_dir, use_rust: true)).to eq <<-'TEXT'.gsub(/^\s+/, '') TODO... TEXT end end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/gauge_spec.rb000066400000000000000000000034451442423076500254310ustar00rootroot00000000000000require 'prometheus/client' require 'prometheus/client/gauge' require 'examples/metric_example' describe Prometheus::Client::Gauge do let(:gauge) { Prometheus::Client::Gauge.new(:foo, 'foo description', test: nil) } before do allow(Prometheus::Client.configuration).to receive(:value_class).and_return(Prometheus::Client::SimpleValue) end it_behaves_like Prometheus::Client::Metric do let(:type) { Float } end describe '#set' do it 'sets a metric value' do expect do gauge.set({}, 42) end.to change { gauge.get }.from(0).to(42) end it 'sets a metric value for a given label set' do expect do expect do gauge.set({ test: 'value' }, 42) end.to(change { gauge.get(test: 'value') }.from(0).to(42)) end.to_not(change { gauge.get }) end end describe '#increment' do it 'increments a metric value' do gauge.set({}, 1) expect do gauge.increment({}, 42) end.to change { gauge.get }.from(1).to(43) end it 'sets a metric value for a given label set' do gauge.increment({ test: 'value' }, 1) expect do expect do gauge.increment({ test: 'value' }, 42) end.to(change { gauge.get(test: 'value') }.from(1).to(43)) end.to_not(change { gauge.get }) end end describe '#decrement' do it 'decrements a metric value' do gauge.set({}, 10) expect do gauge.decrement({}, 1) end.to change { gauge.get }.from(10).to(9) end it 'sets a metric value for a given label set' do gauge.set({ test: 'value' }, 10) expect do expect do gauge.decrement({ test: 'value' }, 5) end.to(change { gauge.get(test: 'value') }.from(10).to(5)) end.to_not(change { gauge.get }) end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/helpers/000077500000000000000000000000001442423076500244365ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/prometheus/client/helpers/json_parser_spec.rb000066400000000000000000000014261442423076500303250ustar00rootroot00000000000000require 'spec_helper' require 'oj' require 'prometheus/client/helper/json_parser' describe Prometheus::Client::Helper::JsonParser do describe '.load' do let(:input) { %({ "a": 1 }) } shared_examples 'JSON parser' do it 'parses JSON' do expect(described_class.load(input)).to eq({ 'a' => 1 }) end it 'raises JSON::ParserError' do expect { described_class.load("{false}") }.to raise_error(JSON::ParserError) end end context 'with Oj' do it_behaves_like 'JSON parser' end context 'without Oj' do before(:all) do Object.send(:remove_const, 'Oj') load File.join(__dir__, "../../../../lib/prometheus/client/helper/json_parser.rb") end it_behaves_like 'JSON parser' end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/helpers/mmaped_file_spec.rb000066400000000000000000000073351442423076500302470ustar00rootroot00000000000000require 'spec_helper' require 'prometheus/client/helper/mmaped_file' require 'prometheus/client/page_size' describe Prometheus::Client::Helper::MmapedFile do let(:filename) { Dir::Tmpname.create('mmaped_file_') {} } after do File.delete(filename) if File.exist?(filename) end describe '.open' do it 'initialize PRIVATE mmaped file read only' do expect(described_class).to receive(:new).with(filename).and_call_original expect(described_class.open(filename)).to be_instance_of(described_class) end end context 'file does not exist' do let (:subject) { described_class.open(filename) } it 'creates and initializes file correctly' do expect(File.exist?(filename)).to be_falsey subject expect(File.exist?(filename)).to be_truthy end it 'creates a file with minimum initial size' do expect(File.size(subject.filepath)).to eq(subject.send(:initial_mmap_file_size)) end context 'when initial mmap size is larger' do let(:page_size) { Prometheus::Client::PageSize.page_size } let (:initial_mmap_file_size) { page_size + 1024 } before do allow_any_instance_of(described_class).to receive(:initial_mmap_file_size).and_return(initial_mmap_file_size) end it 'creates a file with increased minimum initial size' do expect(File.size(subject.filepath)).to eq(page_size * 2); end end end describe '.ensure_exclusive_file' do let(:tmpdir) { Dir.mktmpdir('mmaped_file') } let(:pid) { 'pid' } before do allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return(tmpdir) allow(Prometheus::Client.configuration).to receive(:pid_provider).and_return(pid.method(:to_s)) end context 'when no files are already locked' do it 'provides first possible filename' do expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-0\.db/) end it 'provides first and second possible filenames for two invocations' do expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-0\.db/) expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-1\.db/) end end context 'when first possible file exists for current file ID' do let(:first_mmaped_file) { described_class.ensure_exclusive_file('mmaped_file') } before do first_mmaped_file end context 'first file is unlocked' do before do Prometheus::Client::Helper::FileLocker.unlock(first_mmaped_file) end it 'provides first possible filename discarding the lock' do expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-0\.db/) end it 'provides second possible filename for second invocation' do expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-0\.db/) expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-1\.db/) end end context 'first file is not unlocked' do it 'provides second possible filename' do expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-1\.db/) end it 'provides second and third possible filename for two invocations' do expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-1\.db/) expect(described_class.ensure_exclusive_file('mmaped_file')) .to match(/.*mmaped_file_pid-2\.db/) end end end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/histogram_spec.rb000066400000000000000000000044611442423076500263350ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client' require 'prometheus/client/histogram' require 'examples/metric_example' describe Prometheus::Client::Histogram do before do allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return('tmp/') end let(:histogram) do described_class.new(:bar, 'bar description', {}, [2.5, 5, 10]) end it_behaves_like Prometheus::Client::Metric do let(:type) { Hash } end describe '#initialization' do it 'raise error for unsorted buckets' do expect do described_class.new(:bar, 'bar description', {}, [5, 2.5, 10]) end.to raise_error ArgumentError end it 'raise error for accidentally missing out an argument' do expect do described_class.new(:bar, 'bar description', [5, 2.5, 10]) end.to raise_error Prometheus::Client::LabelSetValidator::InvalidLabelSetError end end describe '#observe' do it 'records the given value' do expect do histogram.observe({}, 5) end.to change { histogram.get } end xit 'raise error for le labels' do expect do histogram.observe({ le: 1 }, 5) end.to raise_error ArgumentError end end describe '#get' do before do histogram.observe({ foo: 'bar' }, 3) histogram.observe({ foo: 'bar' }, 5.2) histogram.observe({ foo: 'bar' }, 13) histogram.observe({ foo: 'bar' }, 4) end xit 'returns a set of buckets values' do expect(histogram.get(foo: 'bar')).to eql(2.5 => 0, 5 => 2, 10 => 3) end xit 'returns a value which responds to #sum and #total' do value = histogram.get(foo: 'bar') expect(value.sum).to eql(25.2) expect(value.total).to eql(4) expect(value.total_inf).to eql(4) end xit 'uses zero as default value' do expect(histogram.get({})).to eql(2.5 => 0, 5 => 0, 10 => 0) end end xdescribe '#values' do it 'returns a hash of all recorded summaries' do histogram.observe({ status: 'bar' }, 3) histogram.observe({ status: 'foo' }, 6) expect(histogram.values).to eql( { status: 'bar' } => { 2.5 => 0, 5 => 1, 10 => 1 }, { status: 'foo' } => { 2.5 => 0, 5 => 0, 10 => 1 }, ) end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/label_set_validator_spec.rb000066400000000000000000000045161442423076500303400ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client/label_set_validator' describe Prometheus::Client::LabelSetValidator do let(:validator) { Prometheus::Client::LabelSetValidator.new } let(:invalid) { Prometheus::Client::LabelSetValidator::InvalidLabelSetError } describe '.new' do it 'returns an instance of a LabelSetValidator' do expect(validator).to be_a(Prometheus::Client::LabelSetValidator) end end describe '#valid?' do it 'returns true for a valid label check' do expect(validator.valid?(version: 'alpha')).to eql(true) end it 'raises InvalidLabelError if a label value is an array' do expect do validator.valid?(version: [1, 2, 3]) end.to raise_exception(described_class::InvalidLabelError) end it 'raises Invaliddescribed_classError if a label set is not a hash' do expect do validator.valid?('invalid') end.to raise_exception invalid end it 'raises InvalidLabelError if a label key is not a symbol' do expect do validator.valid?('key' => 'value') end.to raise_exception(described_class::InvalidLabelError) end it 'raises InvalidLabelError if a label key starts with __' do expect do validator.valid?(__reserved__: 'key') end.to raise_exception(described_class::ReservedLabelError) end it 'raises ReservedLabelError if a label key is reserved' do [:job, :instance].each do |label| expect do validator.valid?(label => 'value') end.to raise_exception(described_class::ReservedLabelError) end end end describe '#validate' do it 'returns a given valid label set' do hash = { version: 'alpha' } expect(validator.validate(hash)).to eql(hash) end it 'raises an exception if a given label set is not valid' do input = 'broken' expect(validator).to receive(:valid?).with(input).and_raise(invalid) expect { validator.validate(input) }.to raise_exception(invalid) end it 'raises InvalidLabelSetError for varying label sets' do validator.validate(method: 'get', code: '200') expect do validator.validate(method: 'get', exception: 'NoMethodError') end.to raise_exception(invalid, "labels must have the same signature: (expected keys: [:code, :method], got: [:exception, :method])") end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/mmaped_dict_spec.rb000066400000000000000000000137601442423076500266100ustar00rootroot00000000000000require 'prometheus/client/mmaped_dict' require 'prometheus/client/page_size' require 'tempfile' describe Prometheus::Client::MmapedDict do let(:tmp_file) { Tempfile.new('mmaped_dict') } let(:tmp_mmaped_file) { Prometheus::Client::Helper::MmapedFile.open(tmp_file.path) } after do tmp_mmaped_file.close tmp_file.close tmp_file.unlink end describe '#initialize' do describe "empty mmap'ed file" do it 'is initialized with correct size' do described_class.new(tmp_mmaped_file) expect(File.size(tmp_file.path)).to eq(tmp_mmaped_file.send(:initial_mmap_file_size)) end end describe "mmap'ed file that is above minimum size" do let(:above_minimum_size) { Prometheus::Client::Helper::EntryParser::MINIMUM_SIZE + 1 } let(:page_size) { Prometheus::Client::PageSize.page_size } before do tmp_file.truncate(above_minimum_size) end it 'is initialized with the a page size' do described_class.new(tmp_mmaped_file) tmp_file.open expect(tmp_file.size).to eq(page_size); end end end describe 'read on boundary conditions' do let(:locked_file) { Prometheus::Client::Helper::MmapedFile.ensure_exclusive_file } let(:mmaped_file) { Prometheus::Client::Helper::MmapedFile.open(locked_file) } let(:page_size) { Prometheus::Client::PageSize.page_size } let(:target_size) { page_size } let(:iterations) { page_size / 32 } let(:dummy_key) { '1234' } let(:dummy_value) { 1.0 } let(:expected) { { dummy_key => dummy_value } } before do Prometheus::Client.configuration.multiprocess_files_dir = Dir.tmpdir data = described_class.new(Prometheus::Client::Helper::MmapedFile.open(locked_file)) # This test exercises the case when the value ends on the last byte. # To generate a file like this, we create entries that require 32 bytes # total to store with 7 bytes of padding at the end. # # To make things align evenly against the system page size, add a dummy # entry that will occupy the next 3 bytes to start on a 32-byte boundary. # The filestructure looks like: # # Bytes 0-3 : Total used size of file # Bytes 4-7 : Padding # Bytes 8-11 : Length of '1234' (4) # Bytes 12-15: '1234' # Bytes 24-31: 1.0 # Bytes 32-35: Length of '1000000000000' (13) # Bytes 36-48: '1000000000000' # Bytes 49-55: Padding # Bytes 56-63: 0.0 # Bytes 64-67: Length of '1000000000001' (13) # Bytes 68-80: '1000000000001' # Bytes 81-87: Padding # Bytes 88-95: 1.0 # ... data.write_value(dummy_key, dummy_value) (1..iterations - 1).each do |i| # Using a 13-byte string text = (1000000000000 + i).to_s expected[text] = i.to_f data.write_value(text, i) end data.close end it '#read_all_values' do values = described_class.read_all_values(locked_file) expect(values.count).to eq(iterations) expect(values).to match_array(expected.to_a) end end describe 'read and write values' do let(:locked_file) { Prometheus::Client::Helper::MmapedFile.ensure_exclusive_file } let(:mmaped_file) { Prometheus::Client::Helper::MmapedFile.open(locked_file) } before do Prometheus::Client.configuration.multiprocess_files_dir = Dir.tmpdir data = described_class.new(Prometheus::Client::Helper::MmapedFile.open(locked_file)) data.write_value('foo', 100) data.write_value('bar', 500) data.close end after do mmaped_file.close if File.exist?(mmaped_file.filepath) Prometheus::Client::Helper::FileLocker.unlock(locked_file) if File.exist?(mmaped_file.filepath) File.unlink(locked_file) if File.exist?(mmaped_file.filepath) end it '#inspect' do data = described_class.new(Prometheus::Client::Helper::MmapedFile.open(locked_file)) expect(data.inspect).to match(/#{described_class}:0x/) expect(data.inspect).not_to match(/@position/) end it '#read_all_values' do values = described_class.read_all_values(locked_file) expect(values.count).to eq(2) expect(values[0]).to eq(['foo', 100]) expect(values[1]).to eq(['bar', 500]) end it '#read_all_positions' do data = described_class.new(Prometheus::Client::Helper::MmapedFile.open(locked_file)) positions = data.positions # Generated via https://github.com/luismartingarcia/protocol: # protocol "Used:4,Pad:4,K1 Size:4,K1 Name:4,K1 Value:8,K2 Size:4,K2 Name:4,K2 Value:8" # # 0 1 2 3 # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | Used | Pad |K1 Size|K1 Name| K1 Value |K2 Size|K2 Name| # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ # | K2 Value | # +-+-+-+-+-+-+-+ expect(positions).to eq({ 'foo' => 16, 'bar' => 32 }) end describe '#write_value' do it 'writes values' do # Reload dictionary # data = described_class.new(mmaped_file) data.write_value('new value', 500) # Overwrite existing values data.write_value('foo', 200) data.write_value('bar', 300) values = described_class.read_all_values(locked_file) expect(values.count).to eq(3) expect(values[0]).to eq(['foo', 200]) expect(values[1]).to eq(['bar', 300]) expect(values[2]).to eq(['new value', 500]) end context 'when mmaped_file got deleted' do it 'is able to write to and expand metrics file' do data = described_class.new(mmaped_file) data.write_value('new value', 500) FileUtils.rm(mmaped_file.filepath) 1000.times do |i| data.write_value("new new value #{i}", 567) end expect(File.exist?(locked_file)).not_to be_truthy end end end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/mmaped_value_spec.rb000066400000000000000000000125201442423076500267720ustar00rootroot00000000000000require 'prometheus/client/mmaped_dict' require 'prometheus/client/page_size' require 'tempfile' describe Prometheus::Client::MmapedValue, :temp_metrics_dir do before do allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return(temp_metrics_dir) end describe '.reset_and_reinitialize' do let(:counter) { described_class.new(:counter, :counter, 'counter', {}) } before do counter.increment(1) end it 'calls reinitialize on the counter' do expect(counter).to receive(:unsafe_reinitialize_file).with(false).and_call_original described_class.reset_and_reinitialize end context 'when metrics folder changes' do around do |example| Dir.mktmpdir('temp_metrics_dir') do |path| @tmp_path = path example.run end end before do allow(Prometheus::Client.configuration).to receive(:multiprocess_files_dir).and_return(@tmp_path) end it 'resets the counter to zero' do expect(counter).to receive(:unsafe_reinitialize_file).with(false).and_call_original expect { described_class.reset_and_reinitialize }.to(change { counter.get }.from(1).to(0)) end end end describe '#initialize' do let(:pid) { 1234 } before do described_class.class_variable_set(:@@files, {}) described_class.class_variable_set(:@@pid, pid) allow(Prometheus::Client.configuration).to receive(:pid_provider).and_return(-> { pid }) allow(Process).to receive(:pid).and_return(pid) end describe 'counter type object initialized' do let!(:counter) { described_class.new(:counter, :counter, 'counter', {}) } describe 'PID unchanged' do it 'initializing gauge MmapValue object type keeps old file data' do described_class.new(:gauge, :gauge, 'gauge', {}, :all) expect(described_class.class_variable_get(:@@files)).to have_key('counter') expect(described_class.class_variable_get(:@@files)).to have_key('gauge_all') end end describe 'PID changed' do let(:new_pid) { pid - 1 } let(:page_size) { Prometheus::Client::PageSize.page_size } before do counter.increment @old_value = counter.get allow(Prometheus::Client.configuration).to receive(:pid_provider).and_return(-> { new_pid }) allow(Process).to receive(:pid).and_return(new_pid) end it 'initializing gauge MmapValue object type keeps old file data' do described_class.new(:gauge, :gauge, 'gauge', {}, :all) expect(described_class.class_variable_get(:@@files)).not_to have_key('counter') expect(described_class.class_variable_get(:@@files)).to have_key('gauge_all') end it 'updates pid' do expect { described_class.new(:gauge, :gauge, 'gauge', {}, :all) } .to change { described_class.class_variable_get(:@@pid) }.from(pid).to(new_pid) end it '#increment updates pid' do expect { counter.increment } .to change { described_class.class_variable_get(:@@pid) }.from(pid).to(new_pid) end it '#increment updates pid' do expect { counter.increment } .to change { described_class.class_variable_get(:@@pid) }.from(pid).to(new_pid) end it '#get updates pid' do expect { counter.get } .to change { described_class.class_variable_get(:@@pid) }.from(pid).to(new_pid) end it '#set updates pid' do expect { counter.set(1) } .to change { described_class.class_variable_get(:@@pid) }.from(pid).to(new_pid) end it '#set logs an error' do counter.set(1) allow(counter.instance_variable_get(:@file)) .to receive(:write_value) .and_raise('error writing value') expect(Prometheus::Client.logger).to receive(:warn).and_call_original counter.set(1) end it 'reinitialize restores all used file references and resets data' do described_class.new(:gauge, :gauge, 'gauge', {}, :all) described_class.reinitialize_on_pid_change expect(described_class.class_variable_get(:@@files)).to have_key('counter') expect(described_class.class_variable_get(:@@files)).to have_key('gauge_all') expect(counter.get).not_to eq(@old_value) end it 'updates strings properly upon memory expansion', :page_size do described_class.new(:gauge, :gauge, 'gauge2', { label_1: 'x' * page_size * 2 }, :all) # This previously failed on Linux but not on macOS since mmap() may re-allocate the same region. ObjectSpace.each_object(String, &:valid_encoding?) end end context 'different label ordering' do it 'does not care about label ordering' do counter1 = described_class.new(:counter, :counter, 'ordered_counter', { label_1: 'hello', label_2: 'world', label_3: 'baz' }).increment counter2 = described_class.new(:counter, :counter, 'ordered_counter', { label_2: 'world', label_3: 'baz', label_1: 'hello' }).increment reading_counter = described_class.new(:counter, :counter, 'ordered_counter', { label_3: 'baz', label_1: 'hello', label_2: 'world' }) expect(reading_counter.get).to eq(2) end end end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/push_spec.rb000066400000000000000000000255721442423076500253250ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client/gauge' require 'prometheus/client/push' describe Prometheus::Client::Push do let(:gateway) { 'http://localhost:9091' } let(:registry) { Prometheus::Client::Registry.new } let(:grouping_key) { {} } let(:push) { Prometheus::Client::Push.new(job: 'test-job', gateway: gateway, grouping_key: grouping_key, open_timeout: 5, read_timeout: 30) } describe '.new' do it 'returns a new push instance' do expect(push).to be_a(Prometheus::Client::Push) end it 'uses localhost as default Pushgateway' do push = Prometheus::Client::Push.new(job: 'test-job') expect(push.gateway).to eql('http://localhost:9091') end it 'allows to specify a custom Pushgateway' do push = Prometheus::Client::Push.new(job: 'test-job', gateway: 'http://pu.sh:1234') expect(push.gateway).to eql('http://pu.sh:1234') end it 'raises an ArgumentError if the job is nil' do expect do Prometheus::Client::Push.new(job: nil) end.to raise_error ArgumentError end it 'raises an ArgumentError if the job is empty' do expect do Prometheus::Client::Push.new(job: "") end.to raise_error ArgumentError end it 'raises an ArgumentError if the given gateway URL is invalid' do ['inva.lid:1233', 'http://[invalid]'].each do |url| expect do Prometheus::Client::Push.new(job: 'test-job', gateway: url) end.to raise_error ArgumentError end end it 'raises InvalidLabelError if a grouping key label has an invalid name' do expect do Prometheus::Client::Push.new(job: "test-job", grouping_key: { "not_a_symbol" => "foo" }) end.to raise_error Prometheus::Client::LabelSetValidator::InvalidLabelError end end describe '#add' do it 'sends a given registry to via HTTP POST' do expect(push).to receive(:request).with(Net::HTTP::Post, registry) push.add(registry) end end describe '#replace' do it 'sends a given registry to via HTTP PUT' do expect(push).to receive(:request).with(Net::HTTP::Put, registry) push.replace(registry) end end describe '#delete' do it 'deletes existing metrics with HTTP DELETE' do expect(push).to receive(:request).with(Net::HTTP::Delete) push.delete end end describe '#path' do it 'uses the default metrics path if no grouping key given' do push = Prometheus::Client::Push.new(job: 'test-job') expect(push.path).to eql('/metrics/job/test-job') end it 'appends additional grouping labels to the path if specified' do push = Prometheus::Client::Push.new( job: 'test-job', grouping_key: { foo: "bar", baz: "qux"}, ) expect(push.path).to eql('/metrics/job/test-job/foo/bar/baz/qux') end it 'encodes grouping key label values containing `/` in url-safe base64' do push = Prometheus::Client::Push.new( job: 'test-job', grouping_key: { foo: "bar/baz"}, ) expect(push.path).to eql('/metrics/job/test-job/foo@base64/YmFyL2Jheg==') end it 'encodes empty grouping key label values as a single base64 padding character' do push = Prometheus::Client::Push.new( job: 'test-job', grouping_key: { foo: ""}, ) expect(push.path).to eql('/metrics/job/test-job/foo@base64/=') end it 'URL-encodes all other non-URL-safe characters' do push = Prometheus::Client::Push.new(job: '', grouping_key: { foo_label: '' }) expected = '/metrics/job/%3Cbar%20job%3E/foo_label/%3Cbar%20value%3E' expect(push.path).to eql(expected) end end describe '#request' do let(:content_type) { Prometheus::Client::Formats::Text::CONTENT_TYPE } let(:data) { Prometheus::Client::Formats::Text.marshal(registry) } let(:uri) { URI.parse("#{gateway}/metrics/job/test-job") } let(:response) do double( :response, code: '200', message: 'OK', body: 'Everything worked' ) end it 'sends marshalled registry to the specified gateway' do request = double(:request) expect(request).to receive(:content_type=).with(content_type) expect(request).to receive(:body=).with(data) expect(Net::HTTP::Post).to receive(:new).with(uri).and_return(request) http = double(:http) expect(http).to receive(:use_ssl=).with(false) expect(http).to receive(:open_timeout=).with(5) expect(http).to receive(:read_timeout=).with(30) expect(http).to receive(:request).with(request).and_return(response) expect(Net::HTTP).to receive(:new).with('localhost', 9091).and_return(http) push.send(:request, Net::HTTP::Post, registry) end context 'for a 3xx response' do let(:response) do double( :response, code: '301', message: 'Moved Permanently', body: 'Probably no body, but technically you can return one' ) end it 'raises a redirect error' do request = double(:request) allow(request).to receive(:content_type=) allow(request).to receive(:body=) allow(Net::HTTP::Post).to receive(:new).with(uri).and_return(request) http = double(:http) allow(http).to receive(:use_ssl=) allow(http).to receive(:open_timeout=) allow(http).to receive(:read_timeout=) allow(http).to receive(:request).with(request).and_return(response) allow(Net::HTTP).to receive(:new).with('localhost', 9091).and_return(http) expect { push.send(:request, Net::HTTP::Post, registry) }.to raise_error( Prometheus::Client::Push::HttpRedirectError ) end end context 'for a 4xx response' do let(:response) do double( :response, code: '400', message: 'Bad Request', body: 'Info on why the request was bad' ) end it 'raises a client error' do request = double(:request) allow(request).to receive(:content_type=) allow(request).to receive(:body=) allow(Net::HTTP::Post).to receive(:new).with(uri).and_return(request) http = double(:http) allow(http).to receive(:use_ssl=) allow(http).to receive(:open_timeout=) allow(http).to receive(:read_timeout=) allow(http).to receive(:request).with(request).and_return(response) allow(Net::HTTP).to receive(:new).with('localhost', 9091).and_return(http) expect { push.send(:request, Net::HTTP::Post, registry) }.to raise_error( Prometheus::Client::Push::HttpClientError ) end end context 'for a 5xx response' do let(:response) do double( :response, code: '500', message: 'Internal Server Error', body: 'Apology for the server code being broken' ) end it 'raises a server error' do request = double(:request) allow(request).to receive(:content_type=) allow(request).to receive(:body=) allow(Net::HTTP::Post).to receive(:new).with(uri).and_return(request) http = double(:http) allow(http).to receive(:use_ssl=) allow(http).to receive(:open_timeout=) allow(http).to receive(:read_timeout=) allow(http).to receive(:request).with(request).and_return(response) allow(Net::HTTP).to receive(:new).with('localhost', 9091).and_return(http) expect { push.send(:request, Net::HTTP::Post, registry) }.to raise_error( Prometheus::Client::Push::HttpServerError ) end end it 'deletes data from the registry' do request = double(:request) expect(request).to receive(:content_type=).with(content_type) expect(Net::HTTP::Delete).to receive(:new).with(uri).and_return(request) http = double(:http) expect(http).to receive(:use_ssl=).with(false) expect(http).to receive(:open_timeout=).with(5) expect(http).to receive(:read_timeout=).with(30) expect(http).to receive(:request).with(request).and_return(response) expect(Net::HTTP).to receive(:new).with('localhost', 9091).and_return(http) push.send(:request, Net::HTTP::Delete) end context 'HTTPS support' do let(:gateway) { 'https://localhost:9091' } it 'uses HTTPS when requested' do request = double(:request) expect(request).to receive(:content_type=).with(content_type) expect(request).to receive(:body=).with(data) expect(Net::HTTP::Post).to receive(:new).with(uri).and_return(request) http = double(:http) expect(http).to receive(:use_ssl=).with(true) expect(http).to receive(:open_timeout=).with(5) expect(http).to receive(:read_timeout=).with(30) expect(http).to receive(:request).with(request).and_return(response) expect(Net::HTTP).to receive(:new).with('localhost', 9091).and_return(http) push.send(:request, Net::HTTP::Post, registry) end end context 'Basic Auth support' do context 'when credentials are passed in the gateway URL' do let(:gateway) { 'https://super:secret@localhost:9091' } it "raises an ArgumentError explaining why we don't support that mechanism" do expect { push }.to raise_error ArgumentError, /in the gateway URL.*username `super`/m end end context 'when credentials are passed to the separate `basic_auth` method' do let(:gateway) { 'https://localhost:9091' } it 'passes the credentials on to the HTTP client' do request = double(:request) expect(request).to receive(:content_type=).with(content_type) expect(request).to receive(:basic_auth).with('super', 'secret') expect(request).to receive(:body=).with(data) expect(Net::HTTP::Put).to receive(:new).with(uri).and_return(request) http = double(:http) expect(http).to receive(:use_ssl=).with(true) expect(http).to receive(:open_timeout=).with(5) expect(http).to receive(:read_timeout=).with(30) expect(http).to receive(:request).with(request).and_return(response) expect(Net::HTTP).to receive(:new).with('localhost', 9091).and_return(http) push.basic_auth("super", "secret") push.send(:request, Net::HTTP::Put, registry) end end end context 'with a grouping key that clashes with a metric label' do let(:grouping_key) { { foo: "bar"} } before do gauge = Prometheus::Client::Gauge.new( :test_gauge, 'test docstring', foo: nil ) registry.register(gauge) gauge.set({ foo: "bar"}, 42) end it 'raises an error when grouping key labels conflict with metric labels' do expect { push.send(:request, Net::HTTP::Post, registry) }.to raise_error( Prometheus::Client::LabelSetValidator::InvalidLabelSetError ) end end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/rack/000077500000000000000000000000001442423076500237145ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/prometheus/client/rack/collector_spec.rb000066400000000000000000000043361442423076500272470ustar00rootroot00000000000000# encoding: UTF-8 require 'rack/test' require 'prometheus/client/rack/collector' describe Prometheus::Client::Rack::Collector do include Rack::Test::Methods before do allow(Prometheus::Client.configuration).to receive(:value_class).and_return(Prometheus::Client::SimpleValue) end let(:registry) do Prometheus::Client::Registry.new end let(:original_app) do ->(_) { [200, { 'Content-Type' => 'text/html' }, ['OK']] } end let!(:app) do described_class.new(original_app, registry: registry) end it 'returns the app response' do get '/foo' expect(last_response).to be_ok expect(last_response.body).to eql('OK') end it 'propagates errors in the registry' do counter = registry.get(:http_requests_total) expect(counter).to receive(:increment).and_raise(NoMethodError) expect { get '/foo' }.to raise_error(NoMethodError) end it 'traces request information' do # expect(Time).to receive(:now).and_return(Time.at(0.0), Time.at(0.2)) labels = { method: 'get', host: 'example.org', path: '/foo', code: '200' } get '/foo' { http_requests_total: 1.0, # http_request_duration_seconds: { 0.5 => 0.2, 0.9 => 0.2, 0.99 => 0.2 }, # TODO: Fix summaries }.each do |metric, result| expect(registry.get(metric).get(labels)).to eql(result) end end context 'when the app raises an exception' do let(:original_app) do lambda do |env| raise NoMethodError if env['PATH_INFO'] == '/broken' [200, { 'Content-Type' => 'text/html' }, ['OK']] end end before do get '/foo' end it 'traces exceptions' do labels = { exception: 'NoMethodError' } expect { get '/broken' }.to raise_error NoMethodError expect(registry.get(:http_exceptions_total).get(labels)).to eql(1.0) end end context 'setting up with a block' do let(:app) do described_class.new(original_app, registry: registry) do |env| { method: env['REQUEST_METHOD'].downcase } # and ignore the path end end it 'allows labels configuration' do get '/foo/bar' labels = { method: 'get', code: '200' } expect(registry.get(:http_requests_total).get(labels)).to eql(1.0) end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/rack/exporter_spec.rb000066400000000000000000000055731442423076500271350ustar00rootroot00000000000000# encoding: UTF-8 require 'rack/test' require 'prometheus/client/rack/exporter' xdescribe Prometheus::Client::Rack::Exporter do include Rack::Test::Methods let(:registry) do Prometheus::Client::Registry.new end let(:app) do app = ->(_) { [200, { 'Content-Type' => 'text/html' }, ['OK']] } Prometheus::Client::Rack::Exporter.new(app, registry: registry) end context 'when requesting app endpoints' do it 'returns the app response' do get '/foo' expect(last_response).to be_ok expect(last_response.body).to eql('OK') end end context 'when requesting /metrics' do text = Prometheus::Client::Formats::Text shared_examples 'ok' do |headers, fmt| it "responds with 200 OK and Content-Type #{fmt::CONTENT_TYPE}" do registry.counter(:foo, 'foo counter').increment({}, 9) get '/metrics', nil, headers expect(last_response.status).to eql(200) expect(last_response.header['Content-Type']).to eql(fmt::CONTENT_TYPE) expect(last_response.body).to eql(fmt.marshal(registry)) end end shared_examples 'not acceptable' do |headers| it 'responds with 406 Not Acceptable' do message = 'Supported media types: text/plain' get '/metrics', nil, headers expect(last_response.status).to eql(406) expect(last_response.header['Content-Type']).to eql('text/plain') expect(last_response.body).to eql(message) end end context 'when client does not send a Accept header' do include_examples 'ok', {}, text end context 'when client accpets any media type' do include_examples 'ok', { 'HTTP_ACCEPT' => '*/*' }, text end context 'when client requests application/json' do include_examples 'not acceptable', 'HTTP_ACCEPT' => 'application/json' end context 'when client requests text/plain' do include_examples 'ok', { 'HTTP_ACCEPT' => 'text/plain' }, text end context 'when client uses different white spaces in Accept header' do accept = 'text/plain;q=1.0 ; version=0.0.4' include_examples 'ok', { 'HTTP_ACCEPT' => accept }, text end context 'when client does not include quality attribute' do accept = 'application/json;q=0.5, text/plain' include_examples 'ok', { 'HTTP_ACCEPT' => accept }, text end context 'when client accepts some unknown formats' do accept = 'text/plain;q=0.3, proto/buf;q=0.7' include_examples 'ok', { 'HTTP_ACCEPT' => accept }, text end context 'when client accepts only unknown formats' do accept = 'fancy/woo;q=0.3, proto/buf;q=0.7' include_examples 'not acceptable', 'HTTP_ACCEPT' => accept end context 'when client accepts unknown formats and wildcard' do accept = 'fancy/woo;q=0.3, proto/buf;q=0.7, */*;q=0.1' include_examples 'ok', { 'HTTP_ACCEPT' => accept }, text end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/registry_spec.rb000066400000000000000000000054501442423076500262070ustar00rootroot00000000000000# encoding: UTF-8 require 'thread' require 'prometheus/client/registry' describe Prometheus::Client::Registry do let(:registry) { Prometheus::Client::Registry.new } describe '.new' do it 'returns a new registry instance' do expect(registry).to be_a(Prometheus::Client::Registry) end end describe '#register' do it 'registers a new metric container and returns it' do metric = double(name: :test) expect(registry.register(metric)).to eql(metric) end it 'raises an exception if a metric name gets registered twice' do metric = double(name: :test) registry.register(metric) expect do registry.register(metric) end.to raise_exception described_class::AlreadyRegisteredError end it 'is thread safe' do mutex = Mutex.new containers = [] def registry.exist?(*args) super.tap { sleep(0.01) } end Array.new(5) do Thread.new do result = begin registry.register(double(name: :test)) rescue Prometheus::Client::Registry::AlreadyRegisteredError nil end mutex.synchronize { containers << result } end end.each(&:join) expect(containers.compact.size).to eql(1) end end describe '#counter' do it 'registers a new counter metric container and returns the counter' do metric = registry.counter(:test, 'test docstring') expect(metric).to be_a(Prometheus::Client::Counter) end end describe '#gauge' do it 'registers a new gauge metric container and returns the gauge' do metric = registry.gauge(:test, 'test docstring') expect(metric).to be_a(Prometheus::Client::Gauge) end end describe '#summary' do it 'registers a new summary metric container and returns the summary' do metric = registry.summary(:test, 'test docstring') expect(metric).to be_a(Prometheus::Client::Summary) end end describe '#histogram' do it 'registers a new histogram metric container and returns the histogram' do metric = registry.histogram(:test, 'test docstring') expect(metric).to be_a(Prometheus::Client::Histogram) end end describe '#exist?' do it 'returns true if a metric name has been registered' do registry.register(double(name: :test)) expect(registry.exist?(:test)).to eql(true) end it 'returns false if a metric name has not been registered yet' do expect(registry.exist?(:test)).to eql(false) end end describe '#get' do it 'returns a previously registered metric container' do registry.register(double(name: :test)) expect(registry.get(:test)).to be end it 'returns nil if the metric has not been registered yet' do expect(registry.get(:test)).to eql(nil) end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/summary_spec.rb000066400000000000000000000026651442423076500260410ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client/summary' require 'examples/metric_example' describe Prometheus::Client::Summary do let(:summary) { Prometheus::Client::Summary.new(:bar, 'bar description') } it_behaves_like Prometheus::Client::Metric do let(:type) { Float } end describe '#observe' do it 'records the given value' do expect do summary.observe({}, 5) end.to change { summary.get } end end xdescribe '#get' do before do summary.observe({ foo: 'bar' }, 3) summary.observe({ foo: 'bar' }, 5.2) summary.observe({ foo: 'bar' }, 13) summary.observe({ foo: 'bar' }, 4) end it 'returns a set of quantile values' do expect(summary.get(foo: 'bar')).to eql(0.5 => 4, 0.9 => 5.2, 0.99 => 5.2) end it 'returns a value which responds to #sum and #total' do value = summary.get(foo: 'bar') expect(value.sum).to eql(25.2) expect(value.total).to eql(4) end it 'uses nil as default value' do expect(summary.get({})).to eql(0.5 => nil, 0.9 => nil, 0.99 => nil) end end xdescribe '#values' do it 'returns a hash of all recorded summaries' do summary.observe({ status: 'bar' }, 3) summary.observe({ status: 'foo' }, 5) expect(summary.values).to eql( { status: 'bar' } => { 0.5 => 3, 0.9 => 3, 0.99 => 3 }, { status: 'foo' } => { 0.5 => 5, 0.9 => 5, 0.99 => 5 }, ) end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client/support/000077500000000000000000000000001442423076500245105ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/spec/prometheus/client/support/unicorn_spec.rb000066400000000000000000000055201442423076500275260ustar00rootroot00000000000000require 'spec_helper' require 'prometheus/client/support/unicorn' class FakeUnicornWorker attr_reader :nr def initialize(nr) @nr = nr end end describe Prometheus::Client::Support::Unicorn do describe '.worker_id' do let(:worker_id) { '09' } around do |example| old_name = $0 example.run $0 = old_name end context 'process name contains worker id' do before do $0 = "program worker[#{worker_id}] arguments" end it 'returns worker_id' do expect(subject.worker_id).to eq(worker_id) end end context 'process name is without worker id' do it 'calls .object_based_worker_id id provider' do expect(subject).to receive(:object_based_worker_id).and_return(worker_id) expect(subject.worker_id).to eq(worker_id) end end end describe '.object_based_worker_id' do context 'when Unicorn is defined' do before do stub_const('Unicorn::Worker', FakeUnicornWorker) end context 'Worker instance is present in ObjectSpace' do let(:worker_number) { 10 } let!(:unicorn_worker) { FakeUnicornWorker.new(worker_number) } it 'Unicorn::Worker to be defined' do expect(defined?(Unicorn::Worker)).to be_truthy end it 'returns worker id' do expect(described_class.object_based_worker_id).to eq(worker_number) end end context 'Worker instance is not present in ObjectSpace' do it 'Unicorn::Worker id defined' do expect(defined?(Unicorn::Worker)).to be_truthy end it 'returns no worker id' do expect(ObjectSpace).to receive(:each_object).with(::Unicorn::Worker).and_return(nil) expect(described_class.object_based_worker_id).to eq(nil) end end end context 'Unicorn::Worker is not defined' do it 'Unicorn::Worker not defined' do expect(defined?(Unicorn::Worker)).to be_falsey end it 'returns no worker_id' do expect(described_class.object_based_worker_id).to eq(nil) end end end describe '.worker_pid_provider' do context 'worker_id is provided' do let(:worker_id) { 2 } before do allow(described_class).to receive(:worker_id).and_return(worker_id) end it 'returns worker pid created from worker id' do expect(described_class.worker_pid_provider).to eq("worker_id_#{worker_id}") end end context 'worker_id is not provided' do let(:process_id) { 10 } before do allow(described_class).to receive(:worker_id).and_return(nil) allow(Process).to receive(:pid).and_return(process_id) end it 'returns worker pid created from Process ID' do expect(described_class.worker_pid_provider).to eq("process_id_#{process_id}") end end end end prometheus-client-mmap-v0.23.1/spec/prometheus/client_spec.rb000066400000000000000000000043501442423076500243350ustar00rootroot00000000000000# encoding: UTF-8 require 'prometheus/client' describe Prometheus::Client do describe '.registry' do it 'returns a registry object' do expect(described_class.registry).to be_a(described_class::Registry) end it 'memorizes the returned object' do expect(described_class.registry).to eql(described_class.registry) end end context '.reset! and .reinitialize_on_pid_change' do let(:metric_name) { :room_temperature_celsius } let(:label) { { room: 'kitchen' } } let(:value) { 21 } let(:gauge) { Prometheus::Client::Gauge.new(metric_name, 'test') } before do described_class.cleanup! described_class.reset! # registering metrics will leak into other specs registry = described_class.registry gauge.set(label, value) registry.register(gauge) expect(registry.metrics.count).to eq(1) expect(registry.get(metric_name).get(label)).to eq(value) end describe '.reset!' do it 'resets registry and clears existing metrics' do described_class.cleanup! described_class.reset! registry = described_class.registry expect(registry.metrics.count).to eq(0) registry.register(gauge) expect(registry.get(metric_name).get(label)).not_to eq(value) end end describe '.reinitialize_on_pid_change' do context 'with force: false' do it 'calls `MmapedValue.reinitialize_on_pid_change`' do expect(Prometheus::Client::MmapedValue).to receive(:reinitialize_on_pid_change).and_call_original described_class.reinitialize_on_pid_change(force: false) end end context 'without explicit :force param' do it 'defaults to `false` and calls `MmapedValue.reinitialize_on_pid_change`' do expect(Prometheus::Client::MmapedValue).to receive(:reinitialize_on_pid_change).and_call_original described_class.reinitialize_on_pid_change end end context 'with force: true' do it 'calls `MmapedValue.reset_and_reinitialize`' do expect(Prometheus::Client::MmapedValue).to receive(:reset_and_reinitialize).and_call_original described_class.reinitialize_on_pid_change(force: true) end end end end end prometheus-client-mmap-v0.23.1/spec/sample_metrics.rb000066400000000000000000000021261442423076500226600ustar00rootroot00000000000000module SampleMetrics def add_simple_metrics(registry) counter = registry.counter(:counter, 'counter', { b: 1 }) counter.increment(a: 1) counter.increment(a: 2) counter.increment(a: 1, b: 2) gauge = registry.gauge(:gauge, 'gauge', {}, :livesum) gauge.set({ b: 1 }, 1) gauge.set({ b: 2 }, 1) gauge_with_pid = registry.gauge(:gauge_with_pid, 'gauge_with_pid', b: 1) gauge_with_pid.set({ c: 1 }, 1) gauge_with_null_labels = registry.gauge(:gauge_with_null_labels, 'gauge_with_null_labels', { a: nil, b: nil }, :livesum) gauge_with_null_labels.set({ a: nil, b: nil }, 1) gauge_with_big_value = registry.gauge(:gauge_with_big_value, 'gauge_with_big_value', { a: 0 }, :livesum) gauge_with_big_value.set({ a: 12345678901234567 }, 12345678901234567) gauge_with_big_value.set({ a: 0.12345678901234567 }, 0.12345678901234567) registry.gauge(:gauge_without_measurements, 'gauge_without_measurements', b: 1) registry.histogram(:histogram, 'histogram', {}).observe({ a: 1 }, 1) registry.summary(:summary, 'summary', a: 1).observe({ b: 1 }, 1) end end prometheus-client-mmap-v0.23.1/spec/spec_helper.rb000066400000000000000000000006111442423076500221370ustar00rootroot00000000000000require 'simplecov' require 'sample_metrics' require 'temp_metrics_dir' SimpleCov.start do add_filter 'fuzz' add_filter 'tmp' add_filter 'vendor/ruby' end RSpec.configure do |config| config.include SampleMetrics, :sample_metrics config.include TempMetricsDir, :temp_metrics_dir config.after(:all) do cleanup_temp_metrics_dir if defined?(cleanup_temp_metrics_dir) end end prometheus-client-mmap-v0.23.1/spec/temp_metrics_dir.rb000066400000000000000000000005501442423076500232010ustar00rootroot00000000000000module TempMetricsDir def temp_metrics_dir @temp_metrics_dir ||= Dir.mktmpdir('temp_metrics_dir') end def cleanup_temp_metrics_dir return if @temp_metrics_dir.nil? begin FileUtils.rm_rf(@temp_metrics_dir) rescue StandardError => ex puts "Files cleanup caused #{ex}" ensure @temp_metrics_dir = nil end end end prometheus-client-mmap-v0.23.1/tmp/000077500000000000000000000000001442423076500171715ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/tmp/.gitkeep000066400000000000000000000000001442423076500206100ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/tools/000077500000000000000000000000001442423076500175315ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/tools/deploy-rubygem.sh000077500000000000000000000010641442423076500230350ustar00rootroot00000000000000#!/usr/bin/env bash set -euo pipefail IFS=$'\n\t' mkdir -p ~/.gem/ temp_file=$(mktemp) # Revert change the credentials file function revert_gem_cred_switch { mv "$temp_file" ~/.gem/credentials } if [[ -f ~/.gem/credentials ]]; then echo "Temporarily moving existing credentials to $temp_file" mv ~/.gem/credentials "$temp_file" trap revert_gem_cred_switch EXIT fi cat < ~/.gem/credentials --- :rubygems_api_key: $RUBYGEMS_API_KEY EOD chmod 600 ~/.gem/credentials set -x bundle install bundle exec rake build ls pkg/*.gem | xargs -n 1 gem push prometheus-client-mmap-v0.23.1/vendor/000077500000000000000000000000001442423076500176665ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/000077500000000000000000000000001442423076500201105ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/hashmap/000077500000000000000000000000001442423076500215315ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/hashmap/.gitignore000066400000000000000000000006561442423076500235300ustar00rootroot00000000000000# Prerequisites *.d # Object files *.o *.ko *.obj *.elf # Linker output *.ilk *.map *.exp # Precompiled Headers *.gch *.pch # Libraries *.lib *.a *.la *.lo # Shared objects (inc. Windows DLLs) *.dll *.so *.so.* *.dylib # Executables *.exe *.out *.app *.i*86 *.x86_64 *.hex # Debug files *.dSYM/ *.su *.idb *.pdb # Kernel Module Compile Results *.mod* *.cmd .tmp_versions/ modules.order Module.symvers Mkfile.old dkms.conf prometheus-client-mmap-v0.23.1/vendor/c/hashmap/LICENSE000066400000000000000000000020541442423076500225370ustar00rootroot00000000000000MIT License Copyright (c) 2016 David Leeds Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. prometheus-client-mmap-v0.23.1/vendor/c/hashmap/README.md000066400000000000000000000106261442423076500230150ustar00rootroot00000000000000# hashmap Flexible hashmap implementation in C using open addressing and linear probing for collision resolution. ### Summary This project came into existence because there are a notable lack of flexible and easy to use data structures available in C. Sure, higher level languages have built-in libraries, but plenty of embedded projects or higher level libraries start with core C code. It was undesirable to add a bulky library like Glib as a dependency to my projects, or grapple with a restrictive license agreement. Searching for "C hashmap" yielded results with questionable algorithms and code quality, projects with difficult or inflexible interfaces, or projects with less desirable licenses. I decided it was time to create my own. ### Goals * **To scale gracefully to the full capacity of the numeric primitives in use.** E.g. on a 32-bit machine, you should be able to load a billion+ entries without hitting any bugs relating to integer overflows. Lookups on a hashtable with a billion entries should be performed in close to constant time, no different than lookups in a hashtable with 20 entries. Automatic rehashing occurs and maintains a load factor of 0.75 or less. * **To provide a clean and easy-to-use interface.** C data structures often struggle to strike a balance between flexibility and ease of use. To this end, I provided a generic interface using void pointers for keys and data, and macros to generate type-specific wrapper functions, if desired. * **To enable easy iteration and safe entry removal during iteration.** Applications often need these features, and the data structure should not hold them back. Both an iterator interface and a foreach function was provided to satisfy various use-cases. This hashmap also uses an open addressing scheme, which has superior iteration performance to a similar hashmap implemented using separate chaining (buckets with linked lists). This is because fewer instructions are needed per iteration, and array traversal has superior cache performance than linked list traversal. * **To use a very unrestrictive software license.** Using no license was an option, but I wanted to allow the code to be tracked, simply for my own edification. I chose the MIT license because it is the most common open source license in use, and it grants full rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell the code. Basically, take this code and do what you want with it. Just be nice and leave the license comment and my name at top of the file. Feel free to add your name if you are modifying and redistributing. ### Code Example ```C #include #include #include /* Some sample data structure with a string key */ struct blob { char key[32]; size_t data_len; unsigned char data[1024]; }; /* Declare type-specific blob_hashmap_* functions with this handy macro */ HASHMAP_FUNCS_CREATE(blob, const char, struct blob) struct blob *blob_load(void) { struct blob *b; /* * Hypothetical function that allocates and loads blob structures * from somewhere. Returns NULL when there are no more blobs to load. */ return b; } /* Hashmap structure */ struct hashmap map; int main(int argc, char **argv) { struct blob *b; struct hashmap_iter *iter; /* Initialize with default string key functions and init size */ hashmap_init(&map, hashmap_hash_string, hashmap_compare_string, 0); /* Load some sample data into the map and discard duplicates */ while ((b = blob_load()) != NULL) { if (blob_hashmap_put(&map, b->key, b) != b) { printf("discarding blob with duplicate key: %s\n", b->key); free(b); } } /* Lookup a blob with key "AbCdEf" */ b = blob_hashmap_get(&map, "AbCdEf"); if (b) { printf("Found blob[%s]\n", b->key); } /* Iterate through all blobs and print each one */ for (iter = hashmap_iter(&map); iter; iter = hashmap_iter_next(&map, iter)) { printf("blob[%s]: data_len %zu bytes\n", blob_hashmap_iter_get_key(iter), blob_hashmap_iter_get_data(iter)->data_len); } /* Remove all blobs with no data */ iter = hashmap_iter(&map); while (iter) { b = blob_hashmap_iter_get_data(iter); if (b->data_len == 0) { iter = hashmap_iter_remove(&map, iter); free(b); } else { iter = hashmap_iter_next(&map, iter); } } /* Free all allocated resources associated with map and reset its state */ hashmap_destroy(&map); return 0; } ``` prometheus-client-mmap-v0.23.1/vendor/c/hashmap/_config.yml000066400000000000000000000000311442423076500236520ustar00rootroot00000000000000theme: jekyll-theme-slateprometheus-client-mmap-v0.23.1/vendor/c/hashmap/src/000077500000000000000000000000001442423076500223205ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/hashmap/src/hashmap.c000066400000000000000000000405361442423076500241150ustar00rootroot00000000000000/* * Copyright (c) 2016-2017 David Leeds * * Hashmap is free software; you can redistribute it and/or modify * it under the terms of the MIT license. See LICENSE for details. */ #include #include #include #include #include #include "hashmap.h" #ifndef HASHMAP_NOASSERT #include #define HASHMAP_ASSERT(expr) assert(expr) #else #define HASHMAP_ASSERT(expr) #endif /* Table sizes must be powers of 2 */ #define HASHMAP_SIZE_MIN (1 << 5) /* 32 */ #define HASHMAP_SIZE_DEFAULT (1 << 8) /* 256 */ #define HASHMAP_SIZE_MOD(map, val) ((val) & ((map)->table_size - 1)) /* Limit for probing is 1/2 of table_size */ #define HASHMAP_PROBE_LEN(map) ((map)->table_size >> 1) /* Return the next linear probe index */ #define HASHMAP_PROBE_NEXT(map, index) HASHMAP_SIZE_MOD(map, (index) + 1) /* Check if index b is less than or equal to index a */ #define HASHMAP_INDEX_LE(map, a, b) \ ((a) == (b) || (((b) - (a)) & ((map)->table_size >> 1)) != 0) struct hashmap_entry { void *key; void *data; #ifdef HASHMAP_METRICS size_t num_collisions; #endif }; /* * Enforce a maximum 0.75 load factor. */ static inline size_t hashmap_table_min_size_calc(size_t num_entries) { return num_entries + (num_entries / 3); } /* * Calculate the optimal table size, given the specified max number * of elements. */ static size_t hashmap_table_size_calc(size_t num_entries) { size_t table_size; size_t min_size; table_size = hashmap_table_min_size_calc(num_entries); /* Table size is always a power of 2 */ min_size = HASHMAP_SIZE_MIN; while (min_size < table_size) { min_size <<= 1; } return min_size; } /* * Get a valid hash table index from a key. */ static inline size_t hashmap_calc_index(const struct hashmap *map, const void *key) { return HASHMAP_SIZE_MOD(map, map->hash(key)); } /* * Return the next populated entry, starting with the specified one. * Returns NULL if there are no more valid entries. */ static struct hashmap_entry *hashmap_entry_get_populated( const struct hashmap *map, struct hashmap_entry *entry) { for (; entry < &map->table[map->table_size]; ++entry) { if (entry->key) { return entry; } } return NULL; } /* * Find the hashmap entry with the specified key, or an empty slot. * Returns NULL if the entire table has been searched without finding a match. */ static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map, const void *key, bool find_empty) { size_t i; size_t index; size_t probe_len = HASHMAP_PROBE_LEN(map); struct hashmap_entry *entry; index = hashmap_calc_index(map, key); /* Linear probing */ for (i = 0; i < probe_len; ++i) { entry = &map->table[index]; if (!entry->key) { if (find_empty) { #ifdef HASHMAP_METRICS entry->num_collisions = i; #endif return entry; } return NULL; } if (map->key_compare(key, entry->key) == 0) { return entry; } index = HASHMAP_PROBE_NEXT(map, index); } return NULL; } /* * Removes the specified entry and processes the proceeding entries to reduce * the load factor and keep the chain continuous. This is a required * step for hash maps using linear probing. */ static void hashmap_entry_remove(struct hashmap *map, struct hashmap_entry *removed_entry) { size_t i; #ifdef HASHMAP_METRICS size_t removed_i = 0; #endif size_t index; size_t entry_index; size_t removed_index = (removed_entry - map->table); struct hashmap_entry *entry; /* Free the key */ if (map->key_free) { map->key_free(removed_entry->key); } --map->num_entries; /* Fill the free slot in the chain */ index = HASHMAP_PROBE_NEXT(map, removed_index); for (i = 1; i < map->table_size; ++i) { entry = &map->table[index]; if (!entry->key) { /* Reached end of chain */ break; } entry_index = hashmap_calc_index(map, entry->key); /* Shift in entries with an index <= to the removed slot */ if (HASHMAP_INDEX_LE(map, removed_index, entry_index)) { #ifdef HASHMAP_METRICS entry->num_collisions -= (i - removed_i); removed_i = i; #endif memcpy(removed_entry, entry, sizeof(*removed_entry)); removed_index = index; removed_entry = entry; } index = HASHMAP_PROBE_NEXT(map, index); } /* Clear the last removed entry */ memset(removed_entry, 0, sizeof(*removed_entry)); } /* * Reallocates the hash table to the new size and rehashes all entries. * new_size MUST be a power of 2. * Returns 0 on success and -errno on allocation or hash function failure. */ static int hashmap_rehash(struct hashmap *map, size_t new_size) { size_t old_size; struct hashmap_entry *old_table; struct hashmap_entry *new_table; struct hashmap_entry *entry; struct hashmap_entry *new_entry; HASHMAP_ASSERT(new_size >= HASHMAP_SIZE_MIN); HASHMAP_ASSERT((new_size & (new_size - 1)) == 0); new_table = (struct hashmap_entry *)calloc(new_size, sizeof(struct hashmap_entry)); if (!new_table) { return -ENOMEM; } /* Backup old elements in case of rehash failure */ old_size = map->table_size; old_table = map->table; map->table_size = new_size; map->table = new_table; /* Rehash */ for (entry = old_table; entry < &old_table[old_size]; ++entry) { if (!entry->data) { /* Only copy entries with data */ continue; } new_entry = hashmap_entry_find(map, entry->key, true); if (!new_entry) { /* * The load factor is too high with the new table * size, or a poor hash function was used. */ goto revert; } /* Shallow copy (intentionally omits num_collisions) */ new_entry->key = entry->key; new_entry->data = entry->data; } free(old_table); return 0; revert: map->table_size = old_size; map->table = old_table; free(new_table); return -EINVAL; } /* * Iterate through all entries and free all keys. */ static void hashmap_free_keys(struct hashmap *map) { struct hashmap_iter *iter; if (!map->key_free) { return; } for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) { map->key_free((void *)hashmap_iter_get_key(iter)); } } /* * Initialize an empty hashmap. A hash function and a key comparator are * required. * * hash_func should return an even distribution of numbers between 0 * and SIZE_MAX varying on the key provided. * * key_compare_func should return 0 if the keys match, and non-zero otherwise. * * initial_size is optional, and may be set to the max number of entries * expected to be put in the hash table. This is used as a hint to * pre-allocate the hash table to the minimum size needed to avoid * gratuitous rehashes. If initial_size 0, a default size will be used. * * Returns 0 on success and -errno on failure. */ int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *), int (*key_compare_func)(const void *, const void *), size_t initial_size) { HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(hash_func != NULL); HASHMAP_ASSERT(key_compare_func != NULL); if (!initial_size) { initial_size = HASHMAP_SIZE_DEFAULT; } else { /* Convert init size to valid table size */ initial_size = hashmap_table_size_calc(initial_size); } map->table_size_init = initial_size; map->table_size = initial_size; map->num_entries = 0; map->table = (struct hashmap_entry *)calloc(initial_size, sizeof(struct hashmap_entry)); if (!map->table) { return -ENOMEM; } map->hash = hash_func; map->key_compare = key_compare_func; map->key_alloc = NULL; map->key_free = NULL; return 0; } /* * Free the hashmap and all associated memory. */ void hashmap_destroy(struct hashmap *map) { if (!map) { return; } hashmap_free_keys(map); free(map->table); memset(map, 0, sizeof(*map)); } /* * Enable internal memory management of hash keys. */ void hashmap_set_key_alloc_funcs(struct hashmap *map, void *(*key_alloc_func)(const void *), void (*key_free_func)(void *)) { HASHMAP_ASSERT(map != NULL); map->key_alloc = key_alloc_func; map->key_free = key_free_func; } /* * Add an entry to the hashmap. If an entry with a matching key already * exists and has a data pointer associated with it, the existing data * pointer is returned, instead of assigning the new value. Compare * the return value with the data passed in to determine if a new entry was * created. Returns NULL if memory allocation failed. */ void *hashmap_put(struct hashmap *map, const void *key, void *data) { struct hashmap_entry *entry; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); /* Rehash with 2x capacity if load factor is approaching 0.75 */ if (map->table_size <= hashmap_table_min_size_calc(map->num_entries)) { hashmap_rehash(map, map->table_size << 1); } entry = hashmap_entry_find(map, key, true); if (!entry) { /* * Cannot find an empty slot. Either out of memory, or using * a poor hash function. Attempt to rehash once to reduce * chain length. */ if (hashmap_rehash(map, map->table_size << 1) < 0) { return NULL; } entry = hashmap_entry_find(map, key, true); if (!entry) { return NULL; } } if (!entry->key) { /* Allocate copy of key to simplify memory management */ if (map->key_alloc) { entry->key = map->key_alloc(key); if (!entry->key) { return NULL; } } else { entry->key = (void *)key; } ++map->num_entries; } else if (entry->data) { /* Do not overwrite existing data */ return entry->data; } entry->data = data; return data; } /* * Return the data pointer, or NULL if no entry exists. */ void *hashmap_get(const struct hashmap *map, const void *key) { struct hashmap_entry *entry; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); entry = hashmap_entry_find(map, key, false); if (!entry) { return NULL; } return entry->data; } /* * Remove an entry with the specified key from the map. * Returns the data pointer, or NULL, if no entry was found. */ void *hashmap_remove(struct hashmap *map, const void *key) { struct hashmap_entry *entry; void *data; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); entry = hashmap_entry_find(map, key, false); if (!entry) { return NULL; } data = entry->data; /* Clear the entry and make the chain contiguous */ hashmap_entry_remove(map, entry); return data; } /* * Remove all entries. */ void hashmap_clear(struct hashmap *map) { HASHMAP_ASSERT(map != NULL); hashmap_free_keys(map); map->num_entries = 0; memset(map->table, 0, sizeof(struct hashmap_entry) * map->table_size); } /* * Remove all entries and reset the hash table to its initial size. */ void hashmap_reset(struct hashmap *map) { struct hashmap_entry *new_table; HASHMAP_ASSERT(map != NULL); hashmap_clear(map); if (map->table_size == map->table_size_init) { return; } new_table = (struct hashmap_entry *)realloc(map->table, sizeof(struct hashmap_entry) * map->table_size_init); if (!new_table) { return; } map->table = new_table; map->table_size = map->table_size_init; } /* * Return the number of entries in the hash map. */ size_t hashmap_size(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); return map->num_entries; } /* * Get a new hashmap iterator. The iterator is an opaque * pointer that may be used with hashmap_iter_*() functions. * Hashmap iterators are INVALID after a put or remove operation is performed. * hashmap_iter_remove() allows safe removal during iteration. */ struct hashmap_iter *hashmap_iter(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return NULL; } return (struct hashmap_iter *)hashmap_entry_get_populated(map, map->table); } /* * Return an iterator to the next hashmap entry. Returns NULL if there are * no more entries. */ struct hashmap_iter *hashmap_iter_next(const struct hashmap *map, const struct hashmap_iter *iter) { struct hashmap_entry *entry = (struct hashmap_entry *)iter; HASHMAP_ASSERT(map != NULL); if (!iter) { return NULL; } return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry + 1); } /* * Remove the hashmap entry pointed to by this iterator and return an * iterator to the next entry. Returns NULL if there are no more entries. */ struct hashmap_iter *hashmap_iter_remove(struct hashmap *map, const struct hashmap_iter *iter) { struct hashmap_entry *entry = (struct hashmap_entry *)iter; HASHMAP_ASSERT(map != NULL); if (!iter) { return NULL; } if (!entry->key) { /* Iterator is invalid, so just return the next valid entry */ return hashmap_iter_next(map, iter); } hashmap_entry_remove(map, entry); return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry); } /* * Return the key of the entry pointed to by the iterator. */ const void *hashmap_iter_get_key(const struct hashmap_iter *iter) { if (!iter) { return NULL; } return (const void *)((struct hashmap_entry *)iter)->key; } /* * Return the data of the entry pointed to by the iterator. */ void *hashmap_iter_get_data(const struct hashmap_iter *iter) { if (!iter) { return NULL; } return ((struct hashmap_entry *)iter)->data; } /* * Set the data pointer of the entry pointed to by the iterator. */ void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data) { if (!iter) { return; } ((struct hashmap_entry *)iter)->data = data; } /* * Invoke func for each entry in the hashmap. Unlike the hashmap_iter_*() * interface, this function supports calls to hashmap_remove() during iteration. * However, it is an error to put or remove an entry other than the current one, * and doing so will immediately halt iteration and return an error. * Iteration is stopped if func returns non-zero. Returns func's return * value if it is < 0, otherwise, 0. */ int hashmap_foreach(const struct hashmap *map, int (*func)(const void *, void *, void *), void *arg) { struct hashmap_entry *entry; size_t num_entries; const void *key; int rc; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(func != NULL); entry = map->table; for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } num_entries = map->num_entries; key = entry->key; rc = func(entry->key, entry->data, arg); if (rc < 0) { return rc; } if (rc > 0) { return 0; } /* Run this entry again if func() deleted it */ if (entry->key != key) { --entry; } else if (num_entries != map->num_entries) { /* Stop immediately if func put/removed another entry */ return -1; } } return 0; } /* * Default hash function for string keys. * This is an implementation of the well-documented Jenkins one-at-a-time * hash function. */ size_t hashmap_hash_string(const void *key) { const char *key_str = (const char *)key; size_t hash = 0; for (; *key_str; ++key_str) { hash += *key_str; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash; } /* * Default key comparator function for string keys. */ int hashmap_compare_string(const void *a, const void *b) { return strcmp((const char *)a, (const char *)b); } /* * Default key allocation function for string keys. Use free() for the * key_free_func. */ void *hashmap_alloc_key_string(const void *key) { return (void *)strdup((const char *)key); } #ifdef HASHMAP_METRICS /* * Return the load factor. */ double hashmap_load_factor(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); if (!map->table_size) { return 0; } return (double)map->num_entries / map->table_size; } /* * Return the average number of collisions per entry. */ double hashmap_collisions_mean(const struct hashmap *map) { struct hashmap_entry *entry; size_t total_collisions = 0; HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return 0; } for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } total_collisions += entry->num_collisions; } return (double)total_collisions / map->num_entries; } /* * Return the variance between entry collisions. The higher the variance, * the more likely the hash function is poor and is resulting in clustering. */ double hashmap_collisions_variance(const struct hashmap *map) { struct hashmap_entry *entry; double mean_collisions; double variance; double total_variance = 0; HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return 0; } mean_collisions = hashmap_collisions_mean(map); for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } variance = (double)entry->num_collisions - mean_collisions; total_variance += variance * variance; } return total_variance / map->num_entries; } #endif prometheus-client-mmap-v0.23.1/vendor/c/hashmap/src/hashmap.h000066400000000000000000000202061442423076500241120ustar00rootroot00000000000000/* * Copyright (c) 2016-2017 David Leeds * * Hashmap is free software; you can redistribute it and/or modify * it under the terms of the MIT license. See LICENSE for details. */ #ifndef __HASHMAP_H__ #define __HASHMAP_H__ #include /* * Define HASHMAP_METRICS to compile in performance analysis * functions for use in assessing hash function performance. */ /* #define HASHMAP_METRICS */ /* * Define HASHMAP_NOASSERT to compile out all assertions used internally. */ /* #define HASHMAP_NOASSERT */ /* * Macros to declare type-specific versions of hashmap_*() functions to * allow compile-time type checking and avoid the need for type casting. */ #define HASHMAP_FUNCS_DECLARE(name, key_type, data_type) \ data_type *name##_hashmap_put(struct hashmap *map, key_type *key, \ data_type *data); \ data_type *name##_hashmap_get(const struct hashmap *map, \ key_type *key); \ data_type *name##_hashmap_remove(struct hashmap *map, \ key_type *key); \ key_type *name##_hashmap_iter_get_key( \ const struct hashmap_iter *iter); \ data_type *name##_hashmap_iter_get_data( \ const struct hashmap_iter *iter); \ void name##_hashmap_iter_set_data(const struct hashmap_iter *iter, \ data_type *data); \ int name##_hashmap_foreach(const struct hashmap *map, \ int (*func)(key_type *, data_type *, void *), void *arg); #define HASHMAP_FUNCS_CREATE(name, key_type, data_type) \ data_type *name##_hashmap_put(struct hashmap *map, key_type *key, \ data_type *data) \ { \ return (data_type *)hashmap_put(map, (const void *)key, \ (void *)data); \ } \ data_type *name##_hashmap_get(const struct hashmap *map, \ key_type *key) \ { \ return (data_type *)hashmap_get(map, (const void *)key); \ } \ data_type *name##_hashmap_remove(struct hashmap *map, \ key_type *key) \ { \ return (data_type *)hashmap_remove(map, (const void *)key); \ } \ key_type *name##_hashmap_iter_get_key( \ const struct hashmap_iter *iter) \ { \ return (key_type *)hashmap_iter_get_key(iter); \ } \ data_type *name##_hashmap_iter_get_data( \ const struct hashmap_iter *iter) \ { \ return (data_type *)hashmap_iter_get_data(iter); \ } \ void name##_hashmap_iter_set_data(const struct hashmap_iter *iter, \ data_type *data) \ { \ hashmap_iter_set_data(iter, (void *)data); \ } \ struct __##name##_hashmap_foreach_state { \ int (*func)(key_type *, data_type *, void *); \ void *arg; \ }; \ static inline int __##name##_hashmap_foreach_callback(const void *key, \ void *data, void *arg) \ { \ struct __##name##_hashmap_foreach_state *s = \ (struct __##name##_hashmap_foreach_state *)arg; \ return s->func((key_type *)key, (data_type *)data, s->arg); \ } \ int name##_hashmap_foreach(const struct hashmap *map, \ int (*func)(key_type *, data_type *, void *), void *arg) \ { \ struct __##name##_hashmap_foreach_state s = { func, arg }; \ return hashmap_foreach(map, \ __##name##_hashmap_foreach_callback, &s); \ } struct hashmap_iter; struct hashmap_entry; /* * The hashmap state structure. */ struct hashmap { size_t table_size_init; size_t table_size; size_t num_entries; struct hashmap_entry *table; size_t (*hash)(const void *); int (*key_compare)(const void *, const void *); void *(*key_alloc)(const void *); void (*key_free)(void *); }; /* * Initialize an empty hashmap. A hash function and a key comparator are * required. * * hash_func should return an even distribution of numbers between 0 * and SIZE_MAX varying on the key provided. * * key_compare_func should return 0 if the keys match, and non-zero otherwise. * * initial_size is optional, and may be set to the max number of entries * expected to be put in the hash table. This is used as a hint to * pre-allocate the hash table to the minimum size to avoid gratuitous rehashes. * If initial_size 0, a default size will be used. * * Returns 0 on success and -errno on failure. */ int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *), int (*key_compare_func)(const void *, const void *), size_t initial_size); /* * Free the hashmap and all associated memory. */ void hashmap_destroy(struct hashmap *map); /* * Enable internal memory allocation and management of hash keys. */ void hashmap_set_key_alloc_funcs(struct hashmap *map, void *(*key_alloc_func)(const void *), void (*key_free_func)(void *)); /* * Add an entry to the hashmap. If an entry with a matching key already * exists and has a data pointer associated with it, the existing data * pointer is returned, instead of assigning the new value. Compare * the return value with the data passed in to determine if a new entry was * created. Returns NULL if memory allocation failed. */ void *hashmap_put(struct hashmap *map, const void *key, void *data); /* * Return the data pointer, or NULL if no entry exists. */ void *hashmap_get(const struct hashmap *map, const void *key); /* * Remove an entry with the specified key from the map. * Returns the data pointer, or NULL, if no entry was found. */ void *hashmap_remove(struct hashmap *map, const void *key); /* * Remove all entries. */ void hashmap_clear(struct hashmap *map); /* * Remove all entries and reset the hash table to its initial size. */ void hashmap_reset(struct hashmap *map); /* * Return the number of entries in the hash map. */ size_t hashmap_size(const struct hashmap *map); /* * Get a new hashmap iterator. The iterator is an opaque * pointer that may be used with hashmap_iter_*() functions. * Hashmap iterators are INVALID after a put or remove operation is performed. * hashmap_iter_remove() allows safe removal during iteration. */ struct hashmap_iter *hashmap_iter(const struct hashmap *map); /* * Return an iterator to the next hashmap entry. Returns NULL if there are * no more entries. */ struct hashmap_iter *hashmap_iter_next(const struct hashmap *map, const struct hashmap_iter *iter); /* * Remove the hashmap entry pointed to by this iterator and returns an * iterator to the next entry. Returns NULL if there are no more entries. */ struct hashmap_iter *hashmap_iter_remove(struct hashmap *map, const struct hashmap_iter *iter); /* * Return the key of the entry pointed to by the iterator. */ const void *hashmap_iter_get_key(const struct hashmap_iter *iter); /* * Return the data of the entry pointed to by the iterator. */ void *hashmap_iter_get_data(const struct hashmap_iter *iter); /* * Set the data pointer of the entry pointed to by the iterator. */ void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data); /* * Invoke func for each entry in the hashmap. Unlike the hashmap_iter_*() * interface, this function supports calls to hashmap_remove() during iteration. * However, it is an error to put or remove an entry other than the current one, * and doing so will immediately halt iteration and return an error. * Iteration is stopped if func returns non-zero. Returns func's return * value if it is < 0, otherwise, 0. */ int hashmap_foreach(const struct hashmap *map, int (*func)(const void *, void *, void *), void *arg); /* * Default hash function for string keys. * This is an implementation of the well-documented Jenkins one-at-a-time * hash function. */ size_t hashmap_hash_string(const void *key); /* * Default key comparator function for string keys. */ int hashmap_compare_string(const void *a, const void *b); /* * Default key allocation function for string keys. Use free() for the * key_free_func. */ void *hashmap_alloc_key_string(const void *key); #ifdef HASHMAP_METRICS /* * Return the load factor. */ double hashmap_load_factor(const struct hashmap *map); /* * Return the average number of collisions per entry. */ double hashmap_collisions_mean(const struct hashmap *map); /* * Return the variance between entry collisions. The higher the variance, * the more likely the hash function is poor and is resulting in clustering. */ double hashmap_collisions_variance(const struct hashmap *map); #endif #endif /* __HASHMAP_H__ */ prometheus-client-mmap-v0.23.1/vendor/c/hashmap/test/000077500000000000000000000000001442423076500225105ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/hashmap/test/Makefile000066400000000000000000000005731442423076500241550ustar00rootroot00000000000000TOP_DIR := $(CURDIR)/.. SRC_DIR := $(TOP_DIR)/src TARGET_EXEC := hashmap_test SOURCES := $(SRC_DIR)/hashmap.c hashmap_test.c CFLAGS += -O0 -g -ggdb -Wall -Wunused -Werror -I$(SRC_DIR) -DHASHMAP_METRICS OBJS := $(SOURCES:%.c=%.o) test: $(TARGET_EXEC) $(TARGET_EXEC): $(OBJS) @echo EXEC $@ ; \ $(CC) -o $@ $(OBJS) clean: rm -f $(OBJS) $(TARGET_EXEC) .PHONY: test clean prometheus-client-mmap-v0.23.1/vendor/c/hashmap/test/hashmap_test.c000066400000000000000000000314651442423076500253450ustar00rootroot00000000000000/* * Copyright (c) 2016-2017 David Leeds * * Hashmap is free software; you can redistribute it and/or modify * it under the terms of the MIT license. See LICENSE for details. */ #include #include #include #include #include #include #include #include #define ARRAY_LEN(array) (sizeof(array) / sizeof(array[0])) #define TEST_NUM_KEYS 196607 /* Results in max load factor */ #define TEST_KEY_STR_LEN 32 void **keys_str_random; void **keys_str_sequential; void **keys_int_random; void **keys_int_sequential; struct hashmap str_map; struct hashmap int_map; struct test { const char *name; const char *description; bool (*run)(struct hashmap *map, void **keys); bool pre_load; }; /* * Test type-specific generation macros */ HASHMAP_FUNCS_DECLARE(test, const void, void) HASHMAP_FUNCS_CREATE(test, const void, void) uint64_t test_time_us(void) { struct timespec now; if (clock_gettime(CLOCK_MONOTONIC, &now)) { assert(0); } return ((uint64_t)now.tv_sec) * 1000000 + (uint64_t)(now.tv_nsec / 1000); } void **test_keys_alloc(size_t num) { void **keys; keys = (void **)calloc(num, sizeof(void *)); if (!keys) { printf("malloc failed\n"); exit(1); } return keys; } void *test_key_alloc_random_str(void) { size_t i; unsigned num; char *key; key = (char *)malloc(TEST_KEY_STR_LEN + 1); if (!key) { printf("malloc failed\n"); exit(1); } for (i = 0; i < TEST_KEY_STR_LEN; ++i) { num = random(); num = (num % 96) + 32; /* ASCII printable only */ key[i] = (char)num; } key[TEST_KEY_STR_LEN] = '\0'; return key; } void *test_key_alloc_random_int(void) { uint64_t *key; key = (uint64_t *)malloc(sizeof(*key)); if (!key) { printf("malloc failed\n"); exit(1); } /* RAND_MAX is not guaranteed to be more than 32K */ *key = (uint64_t)(random() & 0xffff) << 48 | (uint64_t)(random() & 0xffff) << 32 | (uint64_t)(random() & 0xffff) << 16 | (uint64_t)(random() & 0xffff); return key; } void *test_key_alloc_sequential_str(size_t index) { char *key; key = (char *)malloc(TEST_KEY_STR_LEN + 1); if (!key) { printf("malloc failed\n"); exit(1); } snprintf(key, TEST_KEY_STR_LEN + 1, "sequential key! %010zu", index); return key; } void *test_key_alloc_sequential_int(size_t index) { uint64_t *key; key = (uint64_t *)malloc(sizeof(*key)); if (!key) { printf("malloc failed\n"); exit(1); } *key = index; return key; } void test_keys_generate(void) { size_t i; srandom(99); /* Use reproducible random sequences */ keys_str_random = test_keys_alloc(TEST_NUM_KEYS + 1); keys_str_sequential = test_keys_alloc(TEST_NUM_KEYS + 1); keys_int_random = test_keys_alloc(TEST_NUM_KEYS + 1); keys_int_sequential = test_keys_alloc(TEST_NUM_KEYS + 1); for (i = 0; i < TEST_NUM_KEYS; ++i) { keys_str_random[i] = test_key_alloc_random_str(); keys_str_sequential[i] = test_key_alloc_sequential_str(i); keys_int_random[i] = test_key_alloc_random_int(); keys_int_sequential[i] = test_key_alloc_sequential_int(i); } keys_str_random[i] = NULL; keys_str_sequential[i] = NULL; keys_int_random[i] = NULL; keys_int_sequential[i] = NULL; } void test_load_keys(struct hashmap *map, void **keys) { void **key; for (key = keys; *key; ++key) { if (!test_hashmap_put(map, *key, *key)) { printf("hashmap_put() failed"); exit(1); } } } void test_reset_map(struct hashmap *map) { hashmap_reset(map); } void test_print_stats(struct hashmap *map, const char *label) { printf("Hashmap stats: %s\n", label); printf(" # entries: %zu\n", hashmap_size(map)); printf(" Table size: %zu\n", map->table_size); printf(" Load factor: %.4f\n", hashmap_load_factor(map)); printf(" Collisions mean: %.4f\n", hashmap_collisions_mean(map)); printf(" Collisions variance: %.4f\n", hashmap_collisions_variance(map)); } bool test_run(struct hashmap *map, void **keys, const struct test *t) { bool success; uint64_t time_us; assert(t != NULL); assert(t->name != NULL); assert(t->run != NULL); if (t->pre_load) { printf("Pre-loading keys..."); test_load_keys(map, keys); printf("done\n"); } printf("Running...\n"); time_us = test_time_us(); success = t->run(map, keys); time_us = test_time_us() - time_us; if (success) { printf("Completed successfully\n"); } else { printf("Failed\n"); } printf("Run time: %llu microseconds\n", (long long unsigned)time_us); test_print_stats(map, t->name); test_reset_map(map); return success; } bool test_run_all(struct hashmap *map, void **keys, const struct test *tests, size_t num_tests, const char *env) { const struct test *t; size_t num_failed = 0; printf("\n**************************************************\n"); printf("Starting test series:\n"); printf(" %s\n", env); printf("**************************************************\n\n"); for (t = tests; t < &tests[num_tests]; ++t) { printf("\n**************************************************" "\n"); printf("Test %02u: %s\n", (unsigned)(t - tests) + 1, t->name); if (t->description) { printf(" Description: %s\n", t->description); } printf("\n"); if (!test_run(map, keys, t)) { ++num_failed; } } printf("\n**************************************************\n"); printf("Test results:\n"); printf(" Passed: %zu\n", num_tests - num_failed); printf(" Failed: %zu\n", num_failed); printf("**************************************************\n"); return (num_failed == 0); } size_t test_hash_uint64(const void *key) { const uint8_t *byte = (const uint8_t *)key; uint8_t i; size_t hash = 0; for (i = 0; i < sizeof(uint64_t); ++i, ++byte) { hash += *byte; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash; } int test_compare_uint64(const void *a, const void *b) { return *(int64_t *)a - *(int64_t *)b; } bool test_put(struct hashmap *map, void **keys) { void **key; void *data; for (key = keys; *key; ++key) { data = test_hashmap_put(map, *key, *key); if (!data) { printf("malloc failed\n"); exit(1); } if (data != *key) { printf("duplicate key found\n"); return false; } } return true; } bool test_put_existing(struct hashmap *map, void **keys) { void **key; void *data; int temp_data = 99; for (key = keys; *key; ++key) { data = hashmap_put(map, *key, &temp_data); if (!data) { printf("malloc failed\n"); exit(1); } if (data != *key) { printf("did not return existing data\n"); return false; } } return true; } bool test_get(struct hashmap *map, void **keys) { void **key; void *data; for (key = keys; *key; ++key) { data = test_hashmap_get(map, *key); if (!data) { printf("entry not found\n"); return false; } if (data != *key) { printf("got wrong entry\n"); return false; } } return true; } bool test_get_nonexisting(struct hashmap *map, void **keys) { void **key; void *data; const char *fake_key = "test_get_nonexisting fake key!"; for (key = keys; *key; ++key) { data = hashmap_get(map, fake_key); if (data) { printf("unexpected entry found\n"); return false; } } return true; } bool test_remove(struct hashmap *map, void **keys) { void **key; void *data; for (key = keys; *key; ++key) { data = test_hashmap_remove(map, *key); if (!data) { printf("entry not found\n"); return false; } if (data != *key) { printf("removed wrong entry\n"); return false; } } return true; } bool test_put_remove(struct hashmap *map, void **keys) { size_t i = 0; void **key; void *data; if (!test_put(map, keys)) { return false; } for (key = keys; *key; ++key) { if (i++ >= TEST_NUM_KEYS / 2) { break; } data = test_hashmap_remove(map, *key); if (!data) { printf("key not found\n"); return false; } if (data != *key) { printf("removed wrong entry\n"); return false; } } test_print_stats(map, "test_put_remove done"); i = 0; for (key = keys; *key; ++key) { if (i++ >= TEST_NUM_KEYS / 2) { break; } data = test_hashmap_put(map, *key, *key); if (!data) { printf("malloc failed\n"); exit(1); } if (data != *key) { printf("duplicate key found\n"); return false; } } return true; } bool test_iterate(struct hashmap *map, void **keys) { size_t i = 0; struct hashmap_iter *iter = hashmap_iter(map); for (; iter; iter = hashmap_iter_next(map, iter)) { ++i; } if (i != TEST_NUM_KEYS) { printf("did not iterate through all entries: " "observed %zu, expected %u\n", i, TEST_NUM_KEYS); return false; } return true; } bool test_iterate_remove(struct hashmap *map, void **keys) { size_t i = 0; struct hashmap_iter *iter = hashmap_iter(map); const void *key; while (iter) { ++i; key = test_hashmap_iter_get_key(iter); if (test_hashmap_get(map, key) != key) { printf("invalid iterator on entry #%zu\n", i); return false; } iter = hashmap_iter_remove(map, iter); if (test_hashmap_get(map, key) != NULL) { printf("iter_remove failed on entry #%zu\n", i); return false; } } if (i != TEST_NUM_KEYS) { printf("did not iterate through all entries: " "observed %zu, expected %u\n", i, TEST_NUM_KEYS); return false; } return true; } struct test_foreach_arg { struct hashmap *map; size_t i; }; int test_foreach_callback(const void *key, void *data, void *arg) { struct test_foreach_arg *state = (struct test_foreach_arg *)arg; if (state->i & 1) { /* Remove every other key */ if (!test_hashmap_remove(state->map, key)) { printf("could not remove expected key\n"); return -1; } } ++state->i; return 0; } bool test_foreach(struct hashmap *map, void **keys) { struct test_foreach_arg arg = { map, 1 }; size_t size = hashmap_size(map); if (test_hashmap_foreach(map, test_foreach_callback, &arg) < 0) { return false; } if (hashmap_size(map) != size / 2) { printf("foreach delete did not remove expected # of entries: " "contains %zu vs. expected %zu\n", hashmap_size(map), size / 2); return false; } return true; } bool test_clear(struct hashmap *map, void **keys) { hashmap_clear(map); return true; } bool test_reset(struct hashmap *map, void **keys) { hashmap_reset(map); return true; } const struct test const tests[] = { { .name = "put performance", .description = "put new hash keys", .run = test_put }, { .name = "put existing performance", .description = "attempt to put existing hash keys", .run = test_put_existing, .pre_load = true }, { .name = "get existing performance", .description = "get existing hash keys", .run = test_get, .pre_load = true }, { .name = "get non-existing performance", .description = "get nonexistent hash keys", .run = test_get_nonexisting, .pre_load = true }, { .name = "remove performance", .description = "remove hash keys", .run = test_remove, .pre_load = true }, { .name = "mixed put/remove performance", .description = "put, remove 1/2, then put them back", .run = test_put_remove }, { .name = "iterate performance", .description = "iterate through entries", .run = test_iterate, .pre_load = true }, { .name = "iterate remove all", .description = "iterate and remove all entries", .run = test_iterate_remove, .pre_load = true }, { .name = "removal in foreach", .description = "iterate and delete 1/2 using hashmap_foreach", .run = test_foreach, .pre_load = true }, { .name = "clear performance", .description = "clear entries", .run = test_clear, .pre_load = true }, { .name = "reset performance", .description = "reset entries", .run = test_reset, .pre_load = true } }; /* * Main function */ int main(int argc, char **argv) { bool success = true; /* Initialize */ printf("Initializing hash maps..."); if (hashmap_init(&str_map, hashmap_hash_string, hashmap_compare_string, 0) < 0) { success = false; } /* hashmap_set_key_alloc_funcs(&str_map, hashmap_alloc_key_string, free); */ if (hashmap_init(&int_map, test_hash_uint64, test_compare_uint64, 0) < 0) { success = false; } printf("done\n"); if (!success) { printf("Hashmap init failed"); return 1; } printf("Generating test %u test keys...", TEST_NUM_KEYS); test_keys_generate(); printf("done\n"); printf("Running tests\n\n"); success &= test_run_all(&str_map, keys_str_random, tests, ARRAY_LEN(tests), "Hashmap w/randomized string keys"); success &= test_run_all(&str_map, keys_str_sequential, tests, ARRAY_LEN(tests), "Hashmap w/sequential string keys"); success &= test_run_all(&int_map, keys_int_random, tests, ARRAY_LEN(tests), "Hashmap w/randomized integer keys"); success &= test_run_all(&int_map, keys_int_sequential, tests, ARRAY_LEN(tests), "Hashmap w/sequential integer keys"); printf("\nTests finished\n"); hashmap_destroy(&str_map); hashmap_destroy(&int_map); if (!success) { printf("Tests FAILED\n"); exit(1); } return 0; } prometheus-client-mmap-v0.23.1/vendor/c/jsmn/000077500000000000000000000000001442423076500210575ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/jsmn/.travis.yml000066400000000000000000000000561442423076500231710ustar00rootroot00000000000000language: c sudo: false script: - make test prometheus-client-mmap-v0.23.1/vendor/c/jsmn/LICENSE000066400000000000000000000020451442423076500220650ustar00rootroot00000000000000Copyright (c) 2010 Serge A. Zaitsev Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. prometheus-client-mmap-v0.23.1/vendor/c/jsmn/Makefile000066400000000000000000000016021442423076500225160ustar00rootroot00000000000000# You can put your build options here -include config.mk all: libjsmn.a libjsmn.a: jsmn.o $(AR) rc $@ $^ %.o: %.c jsmn.h $(CC) -c $(CFLAGS) $< -o $@ test: test_default test_strict test_links test_strict_links test_default: test/tests.c $(CC) $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ test_strict: test/tests.c $(CC) -DJSMN_STRICT=1 $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ test_links: test/tests.c $(CC) -DJSMN_PARENT_LINKS=1 $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ test_strict_links: test/tests.c $(CC) -DJSMN_STRICT=1 -DJSMN_PARENT_LINKS=1 $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ jsmn_test.o: jsmn_test.c libjsmn.a simple_example: example/simple.o libjsmn.a $(CC) $(LDFLAGS) $^ -o $@ jsondump: example/jsondump.o libjsmn.a $(CC) $(LDFLAGS) $^ -o $@ clean: rm -f *.o example/*.o rm -f *.a *.so rm -f simple_example rm -f jsondump .PHONY: all clean test prometheus-client-mmap-v0.23.1/vendor/c/jsmn/README.md000066400000000000000000000127571442423076500223520ustar00rootroot00000000000000JSMN ==== [![Build Status](https://travis-ci.org/zserge/jsmn.svg?branch=master)](https://travis-ci.org/zserge/jsmn) jsmn (pronounced like 'jasmine') is a minimalistic JSON parser in C. It can be easily integrated into resource-limited or embedded projects. You can find more information about JSON format at [json.org][1] Library sources are available at https://github.com/zserge/jsmn The web page with some information about jsmn can be found at [http://zserge.com/jsmn.html][2] Philosophy ---------- Most JSON parsers offer you a bunch of functions to load JSON data, parse it and extract any value by its name. jsmn proves that checking the correctness of every JSON packet or allocating temporary objects to store parsed JSON fields often is an overkill. JSON format itself is extremely simple, so why should we complicate it? jsmn is designed to be **robust** (it should work fine even with erroneous data), **fast** (it should parse data on the fly), **portable** (no superfluous dependencies or non-standard C extensions). And of course, **simplicity** is a key feature - simple code style, simple algorithm, simple integration into other projects. Features -------- * compatible with C89 * no dependencies (even libc!) * highly portable (tested on x86/amd64, ARM, AVR) * about 200 lines of code * extremely small code footprint * API contains only 2 functions * no dynamic memory allocation * incremental single-pass parsing * library code is covered with unit-tests Design ------ The rudimentary jsmn object is a **token**. Let's consider a JSON string: '{ "name" : "Jack", "age" : 27 }' It holds the following tokens: * Object: `{ "name" : "Jack", "age" : 27}` (the whole object) * Strings: `"name"`, `"Jack"`, `"age"` (keys and some values) * Number: `27` In jsmn, tokens do not hold any data, but point to token boundaries in JSON string instead. In the example above jsmn will create tokens like: Object [0..31], String [3..7], String [12..16], String [20..23], Number [27..29]. Every jsmn token has a type, which indicates the type of corresponding JSON token. jsmn supports the following token types: * Object - a container of key-value pairs, e.g.: `{ "foo":"bar", "x":0.3 }` * Array - a sequence of values, e.g.: `[ 1, 2, 3 ]` * String - a quoted sequence of chars, e.g.: `"foo"` * Primitive - a number, a boolean (`true`, `false`) or `null` Besides start/end positions, jsmn tokens for complex types (like arrays or objects) also contain a number of child items, so you can easily follow object hierarchy. This approach provides enough information for parsing any JSON data and makes it possible to use zero-copy techniques. Install ------- To clone the repository you should have Git installed. Just run: $ git clone https://github.com/zserge/jsmn Repository layout is simple: jsmn.c and jsmn.h are library files, tests are in the jsmn\_test.c, you will also find README, LICENSE and Makefile files inside. To build the library, run `make`. It is also recommended to run `make test`. Let me know, if some tests fail. If build was successful, you should get a `libjsmn.a` library. The header file you should include is called `"jsmn.h"`. API --- Token types are described by `jsmntype_t`: typedef enum { JSMN_UNDEFINED = 0, JSMN_OBJECT = 1, JSMN_ARRAY = 2, JSMN_STRING = 3, JSMN_PRIMITIVE = 4 } jsmntype_t; **Note:** Unlike JSON data types, primitive tokens are not divided into numbers, booleans and null, because one can easily tell the type using the first character: * 't', 'f' - boolean * 'n' - null * '-', '0'..'9' - number Token is an object of `jsmntok_t` type: typedef struct { jsmntype_t type; // Token type int start; // Token start position int end; // Token end position int size; // Number of child (nested) tokens } jsmntok_t; **Note:** string tokens point to the first character after the opening quote and the previous symbol before final quote. This was made to simplify string extraction from JSON data. All job is done by `jsmn_parser` object. You can initialize a new parser using: jsmn_parser parser; jsmntok_t tokens[10]; jsmn_init(&parser); // js - pointer to JSON string // tokens - an array of tokens available // 10 - number of tokens available jsmn_parse(&parser, js, strlen(js), tokens, 10); This will create a parser, and then it tries to parse up to 10 JSON tokens from the `js` string. A non-negative return value of `jsmn_parse` is the number of tokens actually used by the parser. Passing NULL instead of the tokens array would not store parsing results, but instead the function will return the value of tokens needed to parse the given string. This can be useful if you don't know yet how many tokens to allocate. If something goes wrong, you will get an error. Error will be one of these: * `JSMN_ERROR_INVAL` - bad token, JSON string is corrupted * `JSMN_ERROR_NOMEM` - not enough tokens, JSON string is too large * `JSMN_ERROR_PART` - JSON string is too short, expecting more JSON data If you get `JSON_ERROR_NOMEM`, you can re-allocate more tokens and call `jsmn_parse` once more. If you read json data from the stream, you can periodically call `jsmn_parse` and check if return value is `JSON_ERROR_PART`. You will get this error until you reach the end of JSON data. Other info ---------- This software is distributed under [MIT license](http://www.opensource.org/licenses/mit-license.php), so feel free to integrate it in your commercial products. [1]: http://www.json.org/ [2]: http://zserge.com/jsmn.html prometheus-client-mmap-v0.23.1/vendor/c/jsmn/example/000077500000000000000000000000001442423076500225125ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/jsmn/example/jsondump.c000066400000000000000000000054171442423076500245240ustar00rootroot00000000000000#include #include #include #include #include #include "../jsmn.h" /* Function realloc_it() is a wrapper function for standart realloc() * with one difference - it frees old memory pointer in case of realloc * failure. Thus, DO NOT use old data pointer in anyway after call to * realloc_it(). If your code has some kind of fallback algorithm if * memory can't be re-allocated - use standart realloc() instead. */ static inline void *realloc_it(void *ptrmem, size_t size) { void *p = realloc(ptrmem, size); if (!p) { free (ptrmem); fprintf(stderr, "realloc(): errno=%d\n", errno); } return p; } /* * An example of reading JSON from stdin and printing its content to stdout. * The output looks like YAML, but I'm not sure if it's really compatible. */ static int dump(const char *js, jsmntok_t *t, size_t count, int indent) { int i, j, k; if (count == 0) { return 0; } if (t->type == JSMN_PRIMITIVE) { printf("%.*s", t->end - t->start, js+t->start); return 1; } else if (t->type == JSMN_STRING) { printf("'%.*s'", t->end - t->start, js+t->start); return 1; } else if (t->type == JSMN_OBJECT) { printf("\n"); j = 0; for (i = 0; i < t->size; i++) { for (k = 0; k < indent; k++) printf(" "); j += dump(js, t+1+j, count-j, indent+1); printf(": "); j += dump(js, t+1+j, count-j, indent+1); printf("\n"); } return j+1; } else if (t->type == JSMN_ARRAY) { j = 0; printf("\n"); for (i = 0; i < t->size; i++) { for (k = 0; k < indent-1; k++) printf(" "); printf(" - "); j += dump(js, t+1+j, count-j, indent+1); printf("\n"); } return j+1; } return 0; } int main() { int r; int eof_expected = 0; char *js = NULL; size_t jslen = 0; char buf[BUFSIZ]; jsmn_parser p; jsmntok_t *tok; size_t tokcount = 2; /* Prepare parser */ jsmn_init(&p); /* Allocate some tokens as a start */ tok = malloc(sizeof(*tok) * tokcount); if (tok == NULL) { fprintf(stderr, "malloc(): errno=%d\n", errno); return 3; } for (;;) { /* Read another chunk */ r = fread(buf, 1, sizeof(buf), stdin); if (r < 0) { fprintf(stderr, "fread(): %d, errno=%d\n", r, errno); return 1; } if (r == 0) { if (eof_expected != 0) { return 0; } else { fprintf(stderr, "fread(): unexpected EOF\n"); return 2; } } js = realloc_it(js, jslen + r + 1); if (js == NULL) { return 3; } strncpy(js + jslen, buf, r); jslen = jslen + r; again: r = jsmn_parse(&p, js, jslen, tok, tokcount); if (r < 0) { if (r == JSMN_ERROR_NOMEM) { tokcount = tokcount * 2; tok = realloc_it(tok, sizeof(*tok) * tokcount); if (tok == NULL) { return 3; } goto again; } } else { dump(js, tok, p.toknext, 0); eof_expected = 1; } } return EXIT_SUCCESS; } prometheus-client-mmap-v0.23.1/vendor/c/jsmn/example/simple.c000066400000000000000000000042441442423076500241530ustar00rootroot00000000000000#include #include #include #include "../jsmn.h" /* * A small example of jsmn parsing when JSON structure is known and number of * tokens is predictable. */ static const char *JSON_STRING = "{\"user\": \"johndoe\", \"admin\": false, \"uid\": 1000,\n " "\"groups\": [\"users\", \"wheel\", \"audio\", \"video\"]}"; static int jsoneq(const char *json, jsmntok_t *tok, const char *s) { if (tok->type == JSMN_STRING && (int) strlen(s) == tok->end - tok->start && strncmp(json + tok->start, s, tok->end - tok->start) == 0) { return 0; } return -1; } int main() { int i; int r; jsmn_parser p; jsmntok_t t[128]; /* We expect no more than 128 tokens */ jsmn_init(&p); r = jsmn_parse(&p, JSON_STRING, strlen(JSON_STRING), t, sizeof(t)/sizeof(t[0])); if (r < 0) { printf("Failed to parse JSON: %d\n", r); return 1; } /* Assume the top-level element is an object */ if (r < 1 || t[0].type != JSMN_OBJECT) { printf("Object expected\n"); return 1; } /* Loop over all keys of the root object */ for (i = 1; i < r; i++) { if (jsoneq(JSON_STRING, &t[i], "user") == 0) { /* We may use strndup() to fetch string value */ printf("- User: %.*s\n", t[i+1].end-t[i+1].start, JSON_STRING + t[i+1].start); i++; } else if (jsoneq(JSON_STRING, &t[i], "admin") == 0) { /* We may additionally check if the value is either "true" or "false" */ printf("- Admin: %.*s\n", t[i+1].end-t[i+1].start, JSON_STRING + t[i+1].start); i++; } else if (jsoneq(JSON_STRING, &t[i], "uid") == 0) { /* We may want to do strtol() here to get numeric value */ printf("- UID: %.*s\n", t[i+1].end-t[i+1].start, JSON_STRING + t[i+1].start); i++; } else if (jsoneq(JSON_STRING, &t[i], "groups") == 0) { int j; printf("- Groups:\n"); if (t[i+1].type != JSMN_ARRAY) { continue; /* We expect groups to be an array of strings */ } for (j = 0; j < t[i+1].size; j++) { jsmntok_t *g = &t[i+j+2]; printf(" * %.*s\n", g->end - g->start, JSON_STRING + g->start); } i += t[i+1].size + 1; } else { printf("Unexpected key: %.*s\n", t[i].end-t[i].start, JSON_STRING + t[i].start); } } return EXIT_SUCCESS; } prometheus-client-mmap-v0.23.1/vendor/c/jsmn/jsmn.c000066400000000000000000000172531442423076500222020ustar00rootroot00000000000000#include "jsmn.h" /** * Allocates a fresh unused token from the token pull. */ static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *tok; if (parser->toknext >= num_tokens) { return NULL; } tok = &tokens[parser->toknext++]; tok->start = tok->end = -1; tok->size = 0; #ifdef JSMN_PARENT_LINKS tok->parent = -1; #endif return tok; } /** * Fills token type and boundaries. */ static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type, int start, int end) { token->type = type; token->start = start; token->end = end; token->size = 0; } /** * Fills next available token with JSON primitive. */ static int jsmn_parse_primitive(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *token; int start; start = parser->pos; for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { switch (js[parser->pos]) { #ifndef JSMN_STRICT /* In strict mode primitive must be followed by "," or "}" or "]" */ case ':': #endif case '\t' : case '\r' : case '\n' : case ' ' : case ',' : case ']' : case '}' : goto found; } if (js[parser->pos] < 32 || js[parser->pos] >= 127) { parser->pos = start; return JSMN_ERROR_INVAL; } } #ifdef JSMN_STRICT /* In strict mode primitive must be followed by a comma/object/array */ parser->pos = start; return JSMN_ERROR_PART; #endif found: if (tokens == NULL) { parser->pos--; return 0; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) { parser->pos = start; return JSMN_ERROR_NOMEM; } jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos); #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif parser->pos--; return 0; } /** * Fills next token with JSON string. */ static int jsmn_parse_string(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *token; int start = parser->pos; parser->pos++; /* Skip starting quote */ for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { char c = js[parser->pos]; /* Quote: end of string */ if (c == '\"') { if (tokens == NULL) { return 0; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) { parser->pos = start; return JSMN_ERROR_NOMEM; } jsmn_fill_token(token, JSMN_STRING, start+1, parser->pos); #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif return 0; } /* Backslash: Quoted symbol expected */ if (c == '\\' && parser->pos + 1 < len) { int i; parser->pos++; switch (js[parser->pos]) { /* Allowed escaped symbols */ case '\"': case '/' : case '\\' : case 'b' : case 'f' : case 'r' : case 'n' : case 't' : break; /* Allows escaped symbol \uXXXX */ case 'u': parser->pos++; for(i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0'; i++) { /* If it isn't a hex character we have an error */ if(!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */ (js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */ (js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */ parser->pos = start; return JSMN_ERROR_INVAL; } parser->pos++; } parser->pos--; break; /* Unexpected symbol */ default: parser->pos = start; return JSMN_ERROR_INVAL; } } } parser->pos = start; return JSMN_ERROR_PART; } /** * Parse JSON string and fill tokens. */ int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, unsigned int num_tokens) { int r; int i; jsmntok_t *token; int count = parser->toknext; for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { char c; jsmntype_t type; c = js[parser->pos]; switch (c) { case '{': case '[': count++; if (tokens == NULL) { break; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) return JSMN_ERROR_NOMEM; if (parser->toksuper != -1) { tokens[parser->toksuper].size++; #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif } token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY); token->start = parser->pos; parser->toksuper = parser->toknext - 1; break; case '}': case ']': if (tokens == NULL) break; type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY); #ifdef JSMN_PARENT_LINKS if (parser->toknext < 1) { return JSMN_ERROR_INVAL; } token = &tokens[parser->toknext - 1]; for (;;) { if (token->start != -1 && token->end == -1) { if (token->type != type) { return JSMN_ERROR_INVAL; } token->end = parser->pos + 1; parser->toksuper = token->parent; break; } if (token->parent == -1) { if(token->type != type || parser->toksuper == -1) { return JSMN_ERROR_INVAL; } break; } token = &tokens[token->parent]; } #else for (i = parser->toknext - 1; i >= 0; i--) { token = &tokens[i]; if (token->start != -1 && token->end == -1) { if (token->type != type) { return JSMN_ERROR_INVAL; } parser->toksuper = -1; token->end = parser->pos + 1; break; } } /* Error if unmatched closing bracket */ if (i == -1) return JSMN_ERROR_INVAL; for (; i >= 0; i--) { token = &tokens[i]; if (token->start != -1 && token->end == -1) { parser->toksuper = i; break; } } #endif break; case '\"': r = jsmn_parse_string(parser, js, len, tokens, num_tokens); if (r < 0) return r; count++; if (parser->toksuper != -1 && tokens != NULL) tokens[parser->toksuper].size++; break; case '\t' : case '\r' : case '\n' : case ' ': break; case ':': parser->toksuper = parser->toknext - 1; break; case ',': if (tokens != NULL && parser->toksuper != -1 && tokens[parser->toksuper].type != JSMN_ARRAY && tokens[parser->toksuper].type != JSMN_OBJECT) { #ifdef JSMN_PARENT_LINKS parser->toksuper = tokens[parser->toksuper].parent; #else for (i = parser->toknext - 1; i >= 0; i--) { if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) { if (tokens[i].start != -1 && tokens[i].end == -1) { parser->toksuper = i; break; } } } #endif } break; #ifdef JSMN_STRICT /* In strict mode primitives are: numbers and booleans */ case '-': case '0': case '1' : case '2': case '3' : case '4': case '5': case '6': case '7' : case '8': case '9': case 't': case 'f': case 'n' : /* And they must not be keys of the object */ if (tokens != NULL && parser->toksuper != -1) { jsmntok_t *t = &tokens[parser->toksuper]; if (t->type == JSMN_OBJECT || (t->type == JSMN_STRING && t->size != 0)) { return JSMN_ERROR_INVAL; } } #else /* In non-strict mode every unquoted value is a primitive */ default: #endif r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens); if (r < 0) return r; count++; if (parser->toksuper != -1 && tokens != NULL) tokens[parser->toksuper].size++; break; #ifdef JSMN_STRICT /* Unexpected char in strict mode */ default: return JSMN_ERROR_INVAL; #endif } } if (tokens != NULL) { for (i = parser->toknext - 1; i >= 0; i--) { /* Unmatched opened object or array */ if (tokens[i].start != -1 && tokens[i].end == -1) { return JSMN_ERROR_PART; } } } return count; } /** * Creates a new parser based over a given buffer with an array of tokens * available. */ void jsmn_init(jsmn_parser *parser) { parser->pos = 0; parser->toknext = 0; parser->toksuper = -1; } prometheus-client-mmap-v0.23.1/vendor/c/jsmn/jsmn.h000066400000000000000000000031361442423076500222020ustar00rootroot00000000000000#ifndef __JSMN_H_ #define __JSMN_H_ #include #ifdef __cplusplus extern "C" { #endif /** * JSON type identifier. Basic types are: * o Object * o Array * o String * o Other primitive: number, boolean (true/false) or null */ typedef enum { JSMN_UNDEFINED = 0, JSMN_OBJECT = 1, JSMN_ARRAY = 2, JSMN_STRING = 3, JSMN_PRIMITIVE = 4 } jsmntype_t; enum jsmnerr { /* Not enough tokens were provided */ JSMN_ERROR_NOMEM = -1, /* Invalid character inside JSON string */ JSMN_ERROR_INVAL = -2, /* The string is not a full JSON packet, more bytes expected */ JSMN_ERROR_PART = -3 }; /** * JSON token description. * type type (object, array, string etc.) * start start position in JSON data string * end end position in JSON data string */ typedef struct { jsmntype_t type; int start; int end; int size; #ifdef JSMN_PARENT_LINKS int parent; #endif } jsmntok_t; /** * JSON parser. Contains an array of token blocks available. Also stores * the string being parsed now and current position in that string */ typedef struct { unsigned int pos; /* offset in the JSON string */ unsigned int toknext; /* next token to allocate */ int toksuper; /* superior token node, e.g parent object or array */ } jsmn_parser; /** * Create JSON parser over an array of tokens */ void jsmn_init(jsmn_parser *parser); /** * Run JSON parser. It parses a JSON data string into and array of tokens, each describing * a single JSON object. */ int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, unsigned int num_tokens); #ifdef __cplusplus } #endif #endif /* __JSMN_H_ */ prometheus-client-mmap-v0.23.1/vendor/c/jsmn/library.json000066400000000000000000000005601442423076500234170ustar00rootroot00000000000000{ "name": "jsmn", "keywords": "json", "description": "Minimalistic JSON parser/tokenizer in C. It can be easily integrated into resource-limited or embedded projects", "repository": { "type": "git", "url": "https://github.com/zserge/jsmn.git" }, "frameworks": "*", "platforms": "*", "examples": [ "example/*.c" ], "exclude": "test" } prometheus-client-mmap-v0.23.1/vendor/c/jsmn/test/000077500000000000000000000000001442423076500220365ustar00rootroot00000000000000prometheus-client-mmap-v0.23.1/vendor/c/jsmn/test/test.h000066400000000000000000000010431442423076500231640ustar00rootroot00000000000000#ifndef __TEST_H__ #define __TEST_H__ static int test_passed = 0; static int test_failed = 0; /* Terminate current test with error */ #define fail() return __LINE__ /* Successful end of the test case */ #define done() return 0 /* Check single condition */ #define check(cond) do { if (!(cond)) fail(); } while (0) /* Test runner */ static void test(int (*func)(void), const char *name) { int r = func(); if (r == 0) { test_passed++; } else { test_failed++; printf("FAILED: %s (at line %d)\n", name, r); } } #endif /* __TEST_H__ */ prometheus-client-mmap-v0.23.1/vendor/c/jsmn/test/tests.c000066400000000000000000000247661442423076500233630ustar00rootroot00000000000000#include #include #include #include #include "test.h" #include "testutil.h" int test_empty(void) { check(parse("{}", 1, 1, JSMN_OBJECT, 0, 2, 0)); check(parse("[]", 1, 1, JSMN_ARRAY, 0, 2, 0)); check(parse("[{},{}]", 3, 3, JSMN_ARRAY, 0, 7, 2, JSMN_OBJECT, 1, 3, 0, JSMN_OBJECT, 4, 6, 0)); return 0; } int test_object(void) { check(parse("{\"a\":0}", 3, 3, JSMN_OBJECT, 0, 7, 1, JSMN_STRING, "a", 1, JSMN_PRIMITIVE, "0")); check(parse("{\"a\":[]}", 3, 3, JSMN_OBJECT, 0, 8, 1, JSMN_STRING, "a", 1, JSMN_ARRAY, 5, 7, 0)); check(parse("{\"a\":{},\"b\":{}}", 5, 5, JSMN_OBJECT, -1, -1, 2, JSMN_STRING, "a", 1, JSMN_OBJECT, -1, -1, 0, JSMN_STRING, "b", 1, JSMN_OBJECT, -1, -1, 0)); check(parse("{\n \"Day\": 26,\n \"Month\": 9,\n \"Year\": 12\n }", 7, 7, JSMN_OBJECT, -1, -1, 3, JSMN_STRING, "Day", 1, JSMN_PRIMITIVE, "26", JSMN_STRING, "Month", 1, JSMN_PRIMITIVE, "9", JSMN_STRING, "Year", 1, JSMN_PRIMITIVE, "12")); check(parse("{\"a\": 0, \"b\": \"c\"}", 5, 5, JSMN_OBJECT, -1, -1, 2, JSMN_STRING, "a", 1, JSMN_PRIMITIVE, "0", JSMN_STRING, "b", 1, JSMN_STRING, "c", 0)); #ifdef JSMN_STRICT check(parse("{\"a\"\n0}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\", 0}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\": {2}}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\": {2: 3}}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\": {\"a\": 2 3}}", JSMN_ERROR_INVAL, 5)); /* FIXME */ /*check(parse("{\"a\"}", JSMN_ERROR_INVAL, 2));*/ /*check(parse("{\"a\": 1, \"b\"}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{\"a\",\"b\":1}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{\"a\":1,}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{\"a\":\"b\":\"c\"}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{,}", JSMN_ERROR_INVAL, 4));*/ #endif return 0; } int test_array(void) { /* FIXME */ /*check(parse("[10}", JSMN_ERROR_INVAL, 3));*/ /*check(parse("[1,,3]", JSMN_ERROR_INVAL, 3)*/ check(parse("[10]", 2, 2, JSMN_ARRAY, -1, -1, 1, JSMN_PRIMITIVE, "10")); check(parse("{\"a\": 1]", JSMN_ERROR_INVAL, 3)); /* FIXME */ /*check(parse("[\"a\": 1]", JSMN_ERROR_INVAL, 3));*/ return 0; } int test_primitive(void) { check(parse("{\"boolVar\" : true }", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "boolVar", 1, JSMN_PRIMITIVE, "true")); check(parse("{\"boolVar\" : false }", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "boolVar", 1, JSMN_PRIMITIVE, "false")); check(parse("{\"nullVar\" : null }", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "nullVar", 1, JSMN_PRIMITIVE, "null")); check(parse("{\"intVar\" : 12}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "intVar", 1, JSMN_PRIMITIVE, "12")); check(parse("{\"floatVar\" : 12.345}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "floatVar", 1, JSMN_PRIMITIVE, "12.345")); return 0; } int test_string(void) { check(parse("{\"strVar\" : \"hello world\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "strVar", 1, JSMN_STRING, "hello world", 0)); check(parse("{\"strVar\" : \"escapes: \\/\\r\\n\\t\\b\\f\\\"\\\\\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "strVar", 1, JSMN_STRING, "escapes: \\/\\r\\n\\t\\b\\f\\\"\\\\", 0)); check(parse("{\"strVar\": \"\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "strVar", 1, JSMN_STRING, "", 0)); check(parse("{\"a\":\"\\uAbcD\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_STRING, "\\uAbcD", 0)); check(parse("{\"a\":\"str\\u0000\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_STRING, "str\\u0000", 0)); check(parse("{\"a\":\"\\uFFFFstr\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_STRING, "\\uFFFFstr", 0)); check(parse("{\"a\":[\"\\u0280\"]}", 4, 4, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_ARRAY, -1, -1, 1, JSMN_STRING, "\\u0280", 0)); check(parse("{\"a\":\"str\\uFFGFstr\"}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\":\"str\\u@FfF\"}", JSMN_ERROR_INVAL, 3)); check(parse("{{\"a\":[\"\\u028\"]}", JSMN_ERROR_INVAL, 4)); return 0; } int test_partial_string(void) { int i; int r; jsmn_parser p; jsmntok_t tok[5]; const char *js = "{\"x\": \"va\\\\ue\", \"y\": \"value y\"}"; jsmn_init(&p); for (i = 1; i <= strlen(js); i++) { r = jsmn_parse(&p, js, i, tok, sizeof(tok)/sizeof(tok[0])); if (i == strlen(js)) { check(r == 5); check(tokeq(js, tok, 5, JSMN_OBJECT, -1, -1, 2, JSMN_STRING, "x", 1, JSMN_STRING, "va\\\\ue", 0, JSMN_STRING, "y", 1, JSMN_STRING, "value y", 0)); } else { check(r == JSMN_ERROR_PART); } } return 0; } int test_partial_array(void) { #ifdef JSMN_STRICT int r; int i; jsmn_parser p; jsmntok_t tok[10]; const char *js = "[ 1, true, [123, \"hello\"]]"; jsmn_init(&p); for (i = 1; i <= strlen(js); i++) { r = jsmn_parse(&p, js, i, tok, sizeof(tok)/sizeof(tok[0])); if (i == strlen(js)) { check(r == 6); check(tokeq(js, tok, 6, JSMN_ARRAY, -1, -1, 3, JSMN_PRIMITIVE, "1", JSMN_PRIMITIVE, "true", JSMN_ARRAY, -1, -1, 2, JSMN_PRIMITIVE, "123", JSMN_STRING, "hello", 0)); } else { check(r == JSMN_ERROR_PART); } } #endif return 0; } int test_array_nomem(void) { int i; int r; jsmn_parser p; jsmntok_t toksmall[10], toklarge[10]; const char *js; js = " [ 1, true, [123, \"hello\"]]"; for (i = 0; i < 6; i++) { jsmn_init(&p); memset(toksmall, 0, sizeof(toksmall)); memset(toklarge, 0, sizeof(toklarge)); r = jsmn_parse(&p, js, strlen(js), toksmall, i); check(r == JSMN_ERROR_NOMEM); memcpy(toklarge, toksmall, sizeof(toksmall)); r = jsmn_parse(&p, js, strlen(js), toklarge, 10); check(r >= 0); check(tokeq(js, toklarge, 4, JSMN_ARRAY, -1, -1, 3, JSMN_PRIMITIVE, "1", JSMN_PRIMITIVE, "true", JSMN_ARRAY, -1, -1, 2, JSMN_PRIMITIVE, "123", JSMN_STRING, "hello", 0)); } return 0; } int test_unquoted_keys(void) { #ifndef JSMN_STRICT int r; jsmn_parser p; jsmntok_t tok[10]; const char *js; jsmn_init(&p); js = "key1: \"value\"\nkey2 : 123"; r = jsmn_parse(&p, js, strlen(js), tok, 10); check(r >= 0); check(tokeq(js, tok, 4, JSMN_PRIMITIVE, "key1", JSMN_STRING, "value", 0, JSMN_PRIMITIVE, "key2", JSMN_PRIMITIVE, "123")); #endif return 0; } int test_issue_22(void) { int r; jsmn_parser p; jsmntok_t tokens[128]; const char *js; js = "{ \"height\":10, \"layers\":[ { \"data\":[6,6], \"height\":10, " "\"name\":\"Calque de Tile 1\", \"opacity\":1, \"type\":\"tilelayer\", " "\"visible\":true, \"width\":10, \"x\":0, \"y\":0 }], " "\"orientation\":\"orthogonal\", \"properties\": { }, \"tileheight\":32, " "\"tilesets\":[ { \"firstgid\":1, \"image\":\"..\\/images\\/tiles.png\", " "\"imageheight\":64, \"imagewidth\":160, \"margin\":0, \"name\":\"Tiles\", " "\"properties\":{}, \"spacing\":0, \"tileheight\":32, \"tilewidth\":32 }], " "\"tilewidth\":32, \"version\":1, \"width\":10 }"; jsmn_init(&p); r = jsmn_parse(&p, js, strlen(js), tokens, 128); check(r >= 0); return 0; } int test_issue_27(void) { const char *js = "{ \"name\" : \"Jack\", \"age\" : 27 } { \"name\" : \"Anna\", "; check(parse(js, JSMN_ERROR_PART, 8)); return 0; } int test_input_length(void) { const char *js; int r; jsmn_parser p; jsmntok_t tokens[10]; js = "{\"a\": 0}garbage"; jsmn_init(&p); r = jsmn_parse(&p, js, 8, tokens, 10); check(r == 3); check(tokeq(js, tokens, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_PRIMITIVE, "0")); return 0; } int test_count(void) { jsmn_parser p; const char *js; js = "{}"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 1); js = "[]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 1); js = "[[]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 2); js = "[[], []]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 3); js = "[[], []]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 3); js = "[[], [[]], [[], []]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 7); js = "[\"a\", [[], []]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 5); js = "[[], \"[], [[]]\", [[]]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 5); js = "[1, 2, 3]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 4); js = "[1, 2, [3, \"a\"], null]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 7); return 0; } int test_nonstrict(void) { #ifndef JSMN_STRICT const char *js; js = "a: 0garbage"; check(parse(js, 2, 2, JSMN_PRIMITIVE, "a", JSMN_PRIMITIVE, "0garbage")); js = "Day : 26\nMonth : Sep\n\nYear: 12"; check(parse(js, 6, 6, JSMN_PRIMITIVE, "Day", JSMN_PRIMITIVE, "26", JSMN_PRIMITIVE, "Month", JSMN_PRIMITIVE, "Sep", JSMN_PRIMITIVE, "Year", JSMN_PRIMITIVE, "12")); //nested {s don't cause a parse error. js = "\"key {1\": 1234"; check(parse(js, 2, 2, JSMN_STRING, "key {1", 1, JSMN_PRIMITIVE, "1234")); #endif return 0; } int test_unmatched_brackets(void) { const char *js; js = "\"key 1\": 1234}"; check(parse(js, JSMN_ERROR_INVAL, 2)); js = "{\"key 1\": 1234"; check(parse(js, JSMN_ERROR_PART, 3)); js = "{\"key 1\": 1234}}"; check(parse(js, JSMN_ERROR_INVAL, 3)); js = "\"key 1\"}: 1234"; check(parse(js, JSMN_ERROR_INVAL, 3)); js = "{\"key {1\": 1234}"; check(parse(js, 3, 3, JSMN_OBJECT, 0, 16, 1, JSMN_STRING, "key {1", 1, JSMN_PRIMITIVE, "1234")); js = "{{\"key 1\": 1234}"; check(parse(js, JSMN_ERROR_PART, 4)); return 0; } int main(void) { test(test_empty, "test for a empty JSON objects/arrays"); test(test_object, "test for a JSON objects"); test(test_array, "test for a JSON arrays"); test(test_primitive, "test primitive JSON data types"); test(test_string, "test string JSON data types"); test(test_partial_string, "test partial JSON string parsing"); test(test_partial_array, "test partial array reading"); test(test_array_nomem, "test array reading with a smaller number of tokens"); test(test_unquoted_keys, "test unquoted keys (like in JavaScript)"); test(test_input_length, "test strings that are not null-terminated"); test(test_issue_22, "test issue #22"); test(test_issue_27, "test issue #27"); test(test_count, "test tokens count estimation"); test(test_nonstrict, "test for non-strict mode"); test(test_unmatched_brackets, "test for unmatched brackets"); printf("\nPASSED: %d\nFAILED: %d\n", test_passed, test_failed); return (test_failed > 0); } prometheus-client-mmap-v0.23.1/vendor/c/jsmn/test/testutil.h000066400000000000000000000041261442423076500240670ustar00rootroot00000000000000#ifndef __TEST_UTIL_H__ #define __TEST_UTIL_H__ #include "../jsmn.c" static int vtokeq(const char *s, jsmntok_t *t, int numtok, va_list ap) { if (numtok > 0) { int i, start, end, size; int type; char *value; size = -1; value = NULL; for (i = 0; i < numtok; i++) { type = va_arg(ap, int); if (type == JSMN_STRING) { value = va_arg(ap, char *); size = va_arg(ap, int); start = end = -1; } else if (type == JSMN_PRIMITIVE) { value = va_arg(ap, char *); start = end = size = -1; } else { start = va_arg(ap, int); end = va_arg(ap, int); size = va_arg(ap, int); value = NULL; } if (t[i].type != type) { printf("token %d type is %d, not %d\n", i, t[i].type, type); return 0; } if (start != -1 && end != -1) { if (t[i].start != start) { printf("token %d start is %d, not %d\n", i, t[i].start, start); return 0; } if (t[i].end != end ) { printf("token %d end is %d, not %d\n", i, t[i].end, end); return 0; } } if (size != -1 && t[i].size != size) { printf("token %d size is %d, not %d\n", i, t[i].size, size); return 0; } if (s != NULL && value != NULL) { const char *p = s + t[i].start; if (strlen(value) != t[i].end - t[i].start || strncmp(p, value, t[i].end - t[i].start) != 0) { printf("token %d value is %.*s, not %s\n", i, t[i].end-t[i].start, s+t[i].start, value); return 0; } } } } return 1; } static int tokeq(const char *s, jsmntok_t *tokens, int numtok, ...) { int ok; va_list args; va_start(args, numtok); ok = vtokeq(s, tokens, numtok, args); va_end(args); return ok; } static int parse(const char *s, int status, int numtok, ...) { int r; int ok = 1; va_list args; jsmn_parser p; jsmntok_t *t = malloc(numtok * sizeof(jsmntok_t)); jsmn_init(&p); r = jsmn_parse(&p, s, strlen(s), t, numtok); if (r != status) { printf("status is %d, not %d\n", r, status); return 0; } if (status >= 0) { va_start(args, numtok); ok = vtokeq(s, t, numtok, args); va_end(args); } free(t); return ok; } #endif /* __TEST_UTIL_H__ */