prometheus-client-mmap-0.10.0/0000755000004100000410000000000013606417577016237 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/README.md0000644000004100000410000001613413606417577017523 0ustar www-datawww-data# Prometheus Ruby Mmap Client This Prometheus library is fork of [Prometheus Ruby Client](https://github.com/prometheus/client_ruby) that uses mmap'ed files to share metrics from multiple processes. This allows efficient metrics processing for Ruby web apps running in multiprocess setups like Unicorn. A suite of instrumentation metric primitives for Ruby that can be exposed through a HTTP interface. Intended to be used together with a [Prometheus server][1]. [![Gem Version][4]](http://badge.fury.io/rb/prometheus-client-mmap) [![Build Status][3]](https://gitlab.com/gitlab-org/prometheus-client-mmap/commits/master) [![Dependency Status][5]](https://gemnasium.com/prometheus/prometheus-client-mmap) ## Usage ### Overview ```ruby require 'prometheus/client' # returns a default registry prometheus = Prometheus::Client.registry # create a new counter metric http_requests = Prometheus::Client::Counter.new(:http_requests, 'A counter of HTTP requests made') # register the metric prometheus.register(http_requests) # equivalent helper function http_requests = prometheus.counter(:http_requests, 'A counter of HTTP requests made') # start using the counter http_requests.increment ``` ### Rack middleware There are two [Rack][2] middlewares available, one to expose a metrics HTTP endpoint to be scraped by a prometheus server ([Exporter][9]) and one to trace all HTTP requests ([Collector][10]). It's highly recommended to enable gzip compression for the metrics endpoint, for example by including the `Rack::Deflater` middleware. ```ruby # config.ru require 'rack' require 'prometheus/client/rack/collector' require 'prometheus/client/rack/exporter' use Rack::Deflater, if: ->(env, status, headers, body) { body.any? && body[0].length > 512 } use Prometheus::Client::Rack::Collector use Prometheus::Client::Rack::Exporter run ->(env) { [200, {'Content-Type' => 'text/html'}, ['OK']] } ``` Start the server and have a look at the metrics endpoint: [http://localhost:5000/metrics](http://localhost:5000/metrics). For further instructions and other scripts to get started, have a look at the integrated [example application](examples/rack/README.md). ### Pushgateway The Ruby client can also be used to push its collected metrics to a [Pushgateway][8]. This comes in handy with batch jobs or in other scenarios where it's not possible or feasible to let a Prometheus server scrape a Ruby process. ```ruby require 'prometheus/client' require 'prometheus/client/push' prometheus = Prometheus::Client.registry # ... register some metrics, set/increment/observe/etc. their values # push the registry state to the default gateway Prometheus::Client::Push.new('my-batch-job').add(prometheus) # optional: specify the instance name (instead of IP) and gateway Prometheus::Client::Push.new( 'my-job', 'instance-name', 'http://example.domain:1234').add(prometheus) # If you want to replace any previously pushed metrics for a given instance, # use the #replace method. Prometheus::Client::Push.new('my-batch-job', 'instance').replace(prometheus) # If you want to delete all previously pushed metrics for a given instance, # use the #delete method. Prometheus::Client::Push.new('my-batch-job', 'instance').delete ``` ## Metrics The following metric types are currently supported. ### Counter Counter is a metric that exposes merely a sum or tally of things. ```ruby counter = Prometheus::Client::Counter.new(:service_requests_total, '...') # increment the counter for a given label set counter.increment({ service: 'foo' }) # increment by a given value counter.increment({ service: 'bar' }, 5) # get current value for a given label set counter.get({ service: 'bar' }) # => 5 ``` ### Gauge Gauge is a metric that exposes merely an instantaneous value or some snapshot thereof. ```ruby gauge = Prometheus::Client::Gauge.new(:room_temperature_celsius, '...') # set a value gauge.set({ room: 'kitchen' }, 21.534) # retrieve the current value for a given label set gauge.get({ room: 'kitchen' }) # => 21.534 ``` ### Histogram A histogram samples observations (usually things like request durations or response sizes) and counts them in configurable buckets. It also provides a sum of all observed values. ```ruby histogram = Prometheus::Client::Histogram.new(:service_latency_seconds, '...') # record a value histogram.observe({ service: 'users' }, Benchmark.realtime { service.call(arg) }) # retrieve the current bucket values histogram.get({ service: 'users' }) # => { 0.005 => 3, 0.01 => 15, 0.025 => 18, ..., 2.5 => 42, 5 => 42, 10 = >42 } ``` ### Summary Summary, similar to histograms, is an accumulator for samples. It captures Numeric data and provides an efficient percentile calculation mechanism. ```ruby summary = Prometheus::Client::Summary.new(:service_latency_seconds, '...') # record a value summary.observe({ service: 'database' }, Benchmark.realtime { service.call() }) # retrieve the current quantile values summary.get({ service: 'database' }) # => { 0.5 => 0.1233122, 0.9 => 3.4323, 0.99 => 5.3428231 } ``` ## Configuration ### Memory mapped files storage location Set `prometheus_multiproc_dir` environment variable to the path where you want metric files to be stored. Example: ``` prometheus_multiproc_dir=/tmp ``` ## Pitfalls ### PID cardinality In multiprocess setup e.g. running under Unicorn, having worker process restart often can lead to performance problems when proccesing metric files. By default each process using Prometheus metrics will create a set of files based on that process PID. With high worker churn this will lead to creation of thousands of files and in turn will cause very noticable slowdown when displaying metrics To reduce this problem, a surrogate process id can be used. Set of all such IDs needs have low cardinality, and each process id must be unique among all running process. For Unicorn a worker id/number can be used to greatly speedup the metrics rendering. To use it add this line to your `configure` block: ```ruby config.pid_provider = Prometheus::Client::Support::Unicorn.method(:worker_pid_provider) ``` ## Tools ###`bin/parse` This command can be used to parse metric files located on the filesystem just like a metric exporter would. It outputs either `json` formatted raw data or digested data in prometheus `text` format. #### Usage: ```bash $ ./bin/parse -h Usage: parse [options] files... -t, --to-prometheus-text format output using Prometheus text formatter -p, --profile enable profiling -h, --help Show this message ``` ## Development After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment. To install this gem onto your local machine, run `bundle exec rake install`. [1]: https://github.com/prometheus/prometheus [2]: http://rack.github.io/ [3]: https://gitlab.com/gitlab-org/prometheus-client-mmap/badges/master/pipeline.svg [4]: https://badge.fury.io/rb/prometheus-client.svg [8]: https://github.com/prometheus/pushgateway [9]: lib/prometheus/client/rack/exporter.rb [10]: lib/prometheus/client/rack/collector.rb prometheus-client-mmap-0.10.0/prometheus-client-mmap.gemspec0000644000004100000410000001164613606417577024213 0ustar www-datawww-data######################################################### # This file has been automatically generated by gem2tgz # ######################################################### # -*- encoding: utf-8 -*- # stub: prometheus-client-mmap 0.10.0 ruby lib # stub: ext/fast_mmaped_file/extconf.rb Gem::Specification.new do |s| s.name = "prometheus-client-mmap".freeze s.version = "0.10.0" s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= s.require_paths = ["lib".freeze] s.authors = ["Tobias Schmidt".freeze, "Pawe\u{142} Chojnacki".freeze] s.date = "2020-01-08" s.email = ["ts@soundcloud.com".freeze, "pawel@gitlab.com".freeze] s.extensions = ["ext/fast_mmaped_file/extconf.rb".freeze] s.files = ["README.md".freeze, "ext/fast_mmaped_file/extconf.rb".freeze, "ext/fast_mmaped_file/fast_mmaped_file.c".freeze, "ext/fast_mmaped_file/file_format.c".freeze, "ext/fast_mmaped_file/file_format.h".freeze, "ext/fast_mmaped_file/file_parsing.c".freeze, "ext/fast_mmaped_file/file_parsing.h".freeze, "ext/fast_mmaped_file/file_reading.c".freeze, "ext/fast_mmaped_file/file_reading.h".freeze, "ext/fast_mmaped_file/globals.h".freeze, "ext/fast_mmaped_file/hashmap.c".freeze, "ext/fast_mmaped_file/jsmn.c".freeze, "ext/fast_mmaped_file/mmap.c".freeze, "ext/fast_mmaped_file/mmap.h".freeze, "ext/fast_mmaped_file/rendering.c".freeze, "ext/fast_mmaped_file/rendering.h".freeze, "ext/fast_mmaped_file/utils.c".freeze, "ext/fast_mmaped_file/utils.h".freeze, "ext/fast_mmaped_file/value_access.c".freeze, "ext/fast_mmaped_file/value_access.h".freeze, "lib/fast_mmaped_file.bundle".freeze, "lib/prometheus.rb".freeze, "lib/prometheus/client.rb".freeze, "lib/prometheus/client/configuration.rb".freeze, "lib/prometheus/client/counter.rb".freeze, "lib/prometheus/client/formats/text.rb".freeze, "lib/prometheus/client/gauge.rb".freeze, "lib/prometheus/client/helper/entry_parser.rb".freeze, "lib/prometheus/client/helper/file_locker.rb".freeze, "lib/prometheus/client/helper/json_parser.rb".freeze, "lib/prometheus/client/helper/metrics_processing.rb".freeze, "lib/prometheus/client/helper/metrics_representation.rb".freeze, "lib/prometheus/client/helper/mmaped_file.rb".freeze, "lib/prometheus/client/helper/plain_file.rb".freeze, "lib/prometheus/client/histogram.rb".freeze, "lib/prometheus/client/label_set_validator.rb".freeze, "lib/prometheus/client/metric.rb".freeze, "lib/prometheus/client/mmaped_dict.rb".freeze, "lib/prometheus/client/mmaped_value.rb".freeze, "lib/prometheus/client/push.rb".freeze, "lib/prometheus/client/rack/collector.rb".freeze, "lib/prometheus/client/rack/exporter.rb".freeze, "lib/prometheus/client/registry.rb".freeze, "lib/prometheus/client/simple_value.rb".freeze, "lib/prometheus/client/summary.rb".freeze, "lib/prometheus/client/support/unicorn.rb".freeze, "lib/prometheus/client/uses_value_type.rb".freeze, "lib/prometheus/client/version.rb".freeze, "vendor/c/hashmap/LICENSE".freeze, "vendor/c/hashmap/README.md".freeze, "vendor/c/hashmap/_config.yml".freeze, "vendor/c/hashmap/src/hashmap.c".freeze, "vendor/c/hashmap/src/hashmap.h".freeze, "vendor/c/hashmap/test/Makefile".freeze, "vendor/c/hashmap/test/hashmap_test.c".freeze, "vendor/c/jsmn/LICENSE".freeze, "vendor/c/jsmn/Makefile".freeze, "vendor/c/jsmn/README.md".freeze, "vendor/c/jsmn/example/jsondump.c".freeze, "vendor/c/jsmn/example/simple.c".freeze, "vendor/c/jsmn/jsmn.c".freeze, "vendor/c/jsmn/jsmn.h".freeze, "vendor/c/jsmn/library.json".freeze, "vendor/c/jsmn/test/test.h".freeze, "vendor/c/jsmn/test/tests.c".freeze, "vendor/c/jsmn/test/testutil.h".freeze] s.homepage = "https://gitlab.com/gitlab-org/prometheus-client-mmap".freeze s.licenses = ["Apache-2.0".freeze] s.rubygems_version = "2.5.2.1".freeze s.summary = "A suite of instrumentation metric primitivesthat can be exposed through a web services interface.".freeze if s.respond_to? :specification_version then s.specification_version = 4 if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then s.add_development_dependency(%q.freeze, [">= 1.0.4", "~> 1.0"]) s.add_development_dependency(%q.freeze, ["~> 1"]) s.add_development_dependency(%q.freeze, ["~> 0.12.2"]) s.add_development_dependency(%q.freeze, ["~> 1"]) s.add_development_dependency(%q.freeze, ["~> 0.16.2"]) else s.add_dependency(%q.freeze, [">= 1.0.4", "~> 1.0"]) s.add_dependency(%q.freeze, ["~> 1"]) s.add_dependency(%q.freeze, ["~> 0.12.2"]) s.add_dependency(%q.freeze, ["~> 1"]) s.add_dependency(%q.freeze, ["~> 0.16.2"]) end else s.add_dependency(%q.freeze, [">= 1.0.4", "~> 1.0"]) s.add_dependency(%q.freeze, ["~> 1"]) s.add_dependency(%q.freeze, ["~> 0.12.2"]) s.add_dependency(%q.freeze, ["~> 1"]) s.add_dependency(%q.freeze, ["~> 0.16.2"]) end end prometheus-client-mmap-0.10.0/lib/0000755000004100000410000000000013606417577017005 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/lib/fast_mmaped_file.bundle0000755000004100000410000012615413606417577023473 0ustar www-datawww-data (__TEXT``__text__TEXT E __stubs__TEXTtRtR__stub_helper__TEXTlTXlT__cstring__TEXTWW__const__TEXT^5^__unwind_info__TEXT^^__DATA_CONST``__got__DATA_CONST`X`T__DATApp__la_symbol_ptr__DATApp___data__DATArr__bss__DATAr`__common__DATAs8H__LINKEDIT0l,"0Ph P33Ex_ؚ)19tJ2   * 8/usr/lib/libSystem.B.dylib&Ќ)`UHAWAVAUATSHXHH}W)EHELnE1L}LeH t.f.@IH uIEL9DL9H FHt HEHL uHK J4L* t2LL t0LLHUxtL dH}1$H}H}u 1H}HX[A\A]A^A_]@UHAVSH`HHuLuL2LHtwH}HutfH=oI1EIH]H}LHH6.t-HEHEHEHH}DH}LH`[A^]H}CH}16f.fUHHcH H%dHcH$HdHcH<HdHcHTHcHcHlHcHcHHcHPH0H=xHCHcHPH0H=rHCHcH5nHHCH=bH5^HHOuCH=bH5"JCH=bH57HH!CCH=bH5HH$CH=bH5HH&BH=fbH5GH@'BH=GbH5GH(1BH=+bH5GH:1BH=bH5GH:BH=aH5GH5jBH=aH5}GH[8]JBH=FBHaHbHaHH=FXBHaHaH{aHH=zF)BHZaHaHTaHH=OFAH3aHaH-aHH=(FAH aHeaHaH|H=EAH`_UH)]ÐUH]v fDUH]fDUH]fDUH]fDUH]&fDUH]6fDUHHHuHUH5HU H]f.UHHHR]fUHAVSIH.Ht-IN(H.`H; u/IN H&`H; uC@8A]F8HLL[A^] AF8X@8@8I>?L[A^]v?H_H; u @8A_F8H_AF8H; tfUHSPHH?/?HH[]!?UHAWAVAUATSHIAAI@>Ht|HL}A~>HHt\ILLeEH0LELL>C/LkHMHA(HC0AC HEL MDJ HC8 Hs>1HH[A\A]A^A_]f.DUHSPHH50H9_HH1H[] UHH?]UHAVSHWH;VucIHHG(H >^H;u>HC H F^H;t.H *^H;t"H &^H;tH{0Iv0>HtHSH;I6[A^]0?[A^]fUHAWAVAUATSH8H}LFAIrxHHHMI9sH]H8HEHPH5B>} rFHUD`DiFD IL;Ev9H]H8HEHPH5BE1HM1+DH8[A\A]A^A_]DA H]@HEM@<HIA|$<IHILuEH3LDHH<ADI_HMHA(IG0AAG HEIL0IDJ0IG8H}L0HEHMDLxL;}sUIMD$DJDF8IL;EL"L;HNIH8H5AE11*Af.UHAWAVAUATSHXIIHHHHEHþHB;HEHLuH]LHI1LuLIL=W)E)EI4$IT$LHMA'HcEH~(M9~!HI$IT$)HcID$HEL$HLLIHuLEL9H H]HL:HEHAHGHH;Et7HGH8HH5@E1H1W)HGHH;Eu[DHX[A\A]A^A_]1LEL9pHGH8H5g@E1HL1)H}9H5GHH;Et9UHHH8H@HH1HQH9HB]O;f.UHHO(HY1H; u2HG H YH;HYH;H5YH; ]ÐUHAVSHH=9H;9t(HFL08H5?1L11( H[A^]fDUHAWAVATSHH HHVHIE1H19HEؾH8H9IGH8H9IG H8IG(H}9IH5?H@8IL9HHHQ8IGHLHE8I?Ht~8HH8I?7H_EH7H58?E1H1&!H*EH8H5`>E11&DHĠ[A\A^A_]HDH\7H5>HL1&HEIGI?E111c7t1I?97HDH7H5>H11=&zAof.UHAWAVSPHIHFH>Ht'M~L9{s8LR8HHHM~M~L6HHtHHL{IL6HCINH9tlHCH:H5=1H1%MHCL0.6H5(=1LL$HCL8Mv6H5=1LL10%H[A^A_]UHSPHH?Ht5HCHCHH[]ÐUHAWAVSPHIHIHHHt+HHHHHʸ fHHH9rH;H{HC05HCHtL{ Ls(HC8HC01H[A^A_]*1U11HUHAVSIHG8HtuI~tnINH~eI^HHfH;Hu HH9r>Ht9AV8HIFHIFH9vf.H;HuHH9rI~v4IF8IF0IF(IF IFIFIFI[A^]fHt Hw0HW8UH0fUHAWAVAUATSHH}HIHIHuHNHHHLnHHI9LEwML}LLMoILuLHEP MtgHMLaII!E1f.HELpLHI4Ht*H}HEP(tIHEH@HI!IM9r IH}HwHHEHXHH}P HIHMLyII!1f.DHELpMIK4&Ht*H}HEP(tIHEH@HI!HL9r1Mt,I>t IFHu8.HEH@0HtH}IHu 1HEIHEH@HEIFH[A\A]A^A_].)/fUHAWAVAUATSH8HHIHFH=HL1HHSHKHMLcHCHUHHUHHMHHEHUHyHMH9LsIH}S LuMLsII!E1f.LcMIK4,Ht,H}S(t!IHCHI!IL;}r?fMt1HMA$HUHH9ZH}01%HEHCHEHCH}0H8[A\A]A^A_]-.UHAWAVAUATSHHIHIH_HLAU H]HtgLuMeII!E1f.DMuLHI4Ht3H}AU(tIIEHI!IL;}rLHtID1H[A\A]A^A_]--f.@UHAWAVAUATSHHIHIH_HLAU H]HtoLuMeII!E1f.DMuLHI4Ht;H}AU(tIIEHI!IL;}rLHtI\L#1HH[A\A]A^A_]-3-UHAWAVAUATSHHIHH+GIHG8HtH;IL$ID$HH]IHLuM~I!AMt$LHI<HtVAT$ IL$HQH!H+Et HHHt&IIINHUHJHIL$HQL}LuII!II9rHEH@HH[A\A]A^A_]UHAVSHIHG8HtuI~tnINH~eI^HHfH;Hu HH9r>Ht9AV8HIFHIFH9vf.H;HuHH9rIFIvI~H@-[A^]+f.fUHAVSHIHG8HtuI~tnINH~eI^HHfH;Hu HH9r>Ht9AV8HIFHIFH9vf.H;HuHH9rIFIvI~H,I6I9vtI~H.Ht IFIIF[A^]+fDHtHGUH+f.Ht-Ht#HOH~HGHHH8u HH9r1UH+DHt=HHt2HHOHHOH9vf.@H8u HH9r1UH*DUHAVSHt}HHtkIH;t4LHIFHIFH9vFH;u;HH9r.HIFHIFH9vf.H;u HH9r1H[A^]*fUHHtH]1]f.@UHHtHG]1]f.UHHtHw]ÐUHAWAVAUATSPHUHt|IHtyI1H~RMuIHt.M}IvHHUAԅx/u+I9t I DM;}u&IIEHIEI9r1H[A\A]A^A_]ø))UHt1H1HHHH HHHH1HDŽuH1HH H1HHH]f.UH]r+fDUH]h+fDUHAWAVAUATSD7DoI9HAHEI&L=EDDmEf.B6CTw+IcLEAD7EI9rcf.f{}WHtDM1}EAMAA)McIL}DDAt A?ID[DyEcDMυEIcHXHHEDxt 8AHHAHE9HUHLEMEEEUDWIIJDBD MN\HcGHtHD 1{)AE3DoEDUEMEIHHUxDEtPf.AÃ:wIr2]t-}t( ABH9suAHE9DADoHDtDTD ADAHcGHtHD EEEDDmL]EVDI9ILEf.<\t)<"HTA[H93ރSL%zIcLA[H9XЀ r!<%H??HA[H9XЀ r!<%H??HvA[H9swtoXЀ r!<%QH??H:A[H9s;t3XЀ r!<%H??HADDˉDÉAH9H HcGHHȃDUAIcHH]Cȃw{t ;HAyACGEDD]D+CADAEEpE9W%DcGAFAMυDgEEDDmI&DM HtcH]D9É_HDTD\D AHcGHtHD EAډ؉]I&IAEEI&L]DWEEEHt(Ax#IcHHDxt8tHAyDD7EHuD7[A\A]A^A_]D7f""""p {VUHHG]ÐUHAWAVSPHIAH5@HuH=)h!HH@H11.!HHDL_!HH[A^A_]f.UHAVSH 1 IHX P!HCH@HH@@H@8H@0H@(H@ H@H@H@HHC@@L[A^]f.UHSPHHGHxHtEH8Hp(HKHyHu;Ht'Hq0H;q(syt!HKHyHtZWHH[]IHt Ht9H1H,HDH5(H(H߹1 HH,H8H5(1f.UHSPHg f.HHH9rHH[]f.UH]fDUHAWAVAUATSHIHHEH]HH}HHF~-HEШu%HHtHHt%HH߾17XAH]H8ULuL Mo MIE޿AHHH9rD+1H޺EE1HHHIMDa@HHY(MtHY0AHHAH}Ht>IMHAHAuL[A\A^A_]H'H8H5%$1H=%$1H 'H9H5$1MDUHSPH H[ HCHxHtqx@xkH8HOHv^Hp(JHKHyHuaHt/Hq0H;q(sytHCHxHHKHAHH[]H'H8H5A#1HtHtHCH@HH&H:DH5"#HA#H߹d1RH&H8H5I#1:fUHAWAVAUATSH8IHe&HHEL?DoL fMg ID$HxHx@HHHAt@H&H8HLHt/كHu%Ht Hw r H HID$HHCID$H@(HC ID$H@0HCAu\ID$@t HHAFuyINIvAVHHEHEHEHEHHMHL{ uHHtHHtH @HID$@wH]IFHEIcFHEIFHEH=7HPHuHHHEHtPM6L IF H@HxHtUx@xOHHHvCH uHKHH0HEH K$H H;MuH8[A\A]A^A_]HG$H8H5r 1H=r \HC$H8H5 1fUHSPH HC H@HxHtx@xHHHv H[]H#H8H51PUHHHwWHOH]DUHSPHHH[]ÐUHAWAVAUATSH H(H.#HHEиHVIIE11H@H0H(N4HL8t)Mt$HSI;Vu#H{Iv!u@L9LH5X!sIvIVLcLH5D!OLH5F!;IvIVL+LH51I~(LH LH5 LH HHI6LHIVHHPA:AǃKA^HctHx9A_HLHI6)HcLYdLL8ML@LH5Iw0L LH5f.DA\LH5mCHHHcH@HLE1IcGHA9HHHH2)HcLnHB0LH5OLH5;HcHLcTMxXDXE9~KE)HHHAr#PuJ4#H=ltLIcH0HL0LH5SL;}LH58IIL;LHLL@L8t4LH5NIw0L<LH5.LH5AG8LPLHLHcLLIM9H H H H;Mt9H 2H8HHHH511H H H;MulH [A\A]A^A_]H1H8HHHH5vH1H8HHHH5H1H8HHHH5u{UHAWAVSHHIt,)@)P)`)p)e)m)u)}L8L0H(H HHHEHHEHEHEH0HEHuHIHH50HuH=HHq0HL~H5g0HuH=!HHG0HLLH-HH;EuH[A^A_]UHAWAVSHHIt,)@)P)`)p)e)m)u)}L8L0H(H HHHEHHEHEHEH0HEHuHIwHH5a/HuH=HHA/HLNH57/HuH=!HH/HLHHH;Eu1H[A^A_]ef.DUHAWAVSHHIt,)@)P)`)p)e)m)u)}L8L0H(H HgHHEHHEHEHEH0HEHuHQI 8lH5LH1HH5.HuH=rsHH-HLH5-HuH=Z!AHH-HLHHH;Eu1H[A^A_] f.DUHAVSHlHH5F-HuH= HH&-H@IH5-HuH=! HH,HHEIuH#H8H51 H} H5LH1u UHHGHɸE]UHHGH0]ÐUHAWAVAUATSPIIIHHu Lh H߾ [ L{ IGHxHx@x~HHHvr@LL HuLLLLHHǾ Hp IGHH0HH9v?HH[A\A]A^A_] HH8H51H H= HH8H5R1$ f.fUHAWAVAUATSH(HG@OIHL2A HuHMu AAMwI9%DAEHCHHp( ADEGd. L9r+HCHp(L9sHHu1=CL.MDL}I uIWUHL,LtH}l ULH8 }Nt/QL " }:AH} EHEKD5HCHD HcEHTH}Lw H([A\A]A^A_]H=N8 HH8H51 fUHAWAVAUATSPIIIHH L H߾ L{ IGHxH#x@HHH @LL Hu[LLLLHHǾ2 H IGHH0HH9HH[A\A]A^A_]: HHǾH^ AIGHH0HL9@uuL(L) EHEKD%H߾H IGHH0HH9v@HH[A\A]A^A_]HbH8H51H=wHFH8H51H.H8H5L1fUHSPH HC H@HxHt1x@x+HHQHv@u0EHD H[]HH8H51=H=fUHAVSIHHrH߾ eH[ HCHxHtIx@xCHHHv7@uIHx(wHNt=LHKH L[A^]H H8H551H=51VfDUHAWAVSPIHOHq(L9v"HH8H51H[A^A_]tHH9HtDlHKt7HHvH8H5E11zHCx@yD"HHA0HA(HCx@y HxHp1eHCx@M~L1HKy@Ht!H5-HsHu:Lv(oDHC@@HH8H5L1u<~@ HC@@H H9HPHH51HsV NDF@LN81LHt HKHLq(Lq0AAu<HH8H5E11HKAAH9LusHH4H5H1H[A^A_]HH8HCHHHH5H1E1y@HC@@DH[A^A_]ÐUHH=u H5{ H f.UHH=E H5K H af.UHH= H5 H i 1f.UHH=f H5 H  :f.UHH=R H5 H Kf.UHH=" H5 H Lf.UHH= H5[ H qf.UHH=z H5+ H Af.UHH= H5 H  {f.UHH= H5 H ` |f.UHH=\ H5 H f.UHH=, H5k H f.UHH= H5; H _ Qf.UHH= H5 H / !f.UHH= H5H f.UHH= H5H f.UHH= H5{H f.UHH=c H5KH oaf.UHH=G H5H ?11f.UHH= H5H  2%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % %%%%%%%%%% %"%$%&%(%*%,hhhhhhhh6hNhghh|hrhhh^hThJh#@hH6h],ht"hhhhhhhhh/h@hVhhh~hhhhxhnhdhZh7PhSFhe<h2h(hhh hL9AS% hhh#h2hJhXhehshhhhxhnhdhZhPhFhhKhY hhhuhhh,h;hLh[hkh{hgaugeminmaxlivesumpidsamplesPrometheusParsingErrorFastMmapedFileMAP_SHAREDto_metricsnewinitializeslicesyncmunmapusedused=fetch_entryupsert_entrysource file %s corrupted, used %u > file size %usource file %s corrupted, used %u < stored data length %uFailed creating metrics entryCouldn't allocate for %zu memoryProcessed entries %zu != map entries %zuCan't fclose file, errno: %dwrong number of arguments %lu instead of 4Can't malloc %zu, errno: %dCan't realloc %zu, errno: %dCouldn't read whole file, read %zu, instead of %zurCan't open %s, errno: %dCan't stat file, errno: %dCan't fseek %zu, errno: %dhashmap_init../../../../ext/fast_mmaped_file/hashmap.cmap != ((void*)0)hash_func != ((void*)0)key_compare_func != ((void*)0)hashmap_set_key_alloc_funcshashmap_putkey != ((void*)0)hashmap_gethashmap_removehashmap_clearhashmap_resethashmap_sizehashmap_iterhashmap_iter_nexthashmap_iter_removehashmap_foreachfunc != ((void*)0)hashmap_rehashnew_size >= (1 << 5)(new_size & (new_size - 1)) == 0allocateInsecure operationCan't open %sCan't stat %sCan't reserve %zu bytes for memory-mapped filemmap failed (%d)[]unmapped filemmapmsync(%d)munmap failed at %s:%d with errno: %d../../../../ext/fast_mmaped_file/mmap.ctruncateInsecure: can't modify mmap %.*g too many labels or malformed json: %smalformed json: %smismatched number of labels: %sparsing failed: %s{pid=""}{=",,pid="} (%s)prometheus_last_exceptionprometheus_last_exception_message%sno exception found in thread localoffset %zu out of stringstring length gt %dCan't reduce the size of mmapmlock(%d)munmap failed%s: Can't open %s../../../../ext/fast_mmaped_file/value_access.cCan't lseek %zuCan't extend %smmap failed# HELP Multiprocess metric # TYPE null44!Xa  LLtRL /0P  P$%% &@'P')+,0////48p990>>`?ApVzVVVVVVVVVVVVVVWWW$W.W8WBWLWVW`WlTvTTTTTTTTTTTTTTU UU U*U4U>UHURU\UfUpUzUUUUUUUUUUUUUUVVV$V.V8VjWBVLVVVtW~WWWWWWW"`T>@_rb_cObjectQq@_rb_cString@_rb_eArgError@_rb_eIOError@_rb_eIndexError@_rb_eNoMemError@_rb_eRuntimeError@_rb_eSecurityError@_rb_eTypeError@___stack_chk_guard@dyld_stub_binderHr@___assert_rtnr@___bzeror@___errorr@___stack_chk_failr @_callocr(@_closer0@_fcloser8@_filenor@@_fopen$DARWIN_EXTSNrH@_freadrP@_freerX@_fseekr`@_fstat$INODE64rh@_ftruncaterp@_lseekrx@_mallocr@_memcpyr@_memmover@_memsetr@_mlockr@_mmapr@_msyncr@_munmapr@_openr@_qsortr>@_rb_ary_detransientr>@_rb_ary_entryr>@_rb_check_safe_objr>@_rb_check_typer>@_rb_data_object_zallocr>@_rb_define_alloc_funcr>@_rb_define_classr>@_rb_define_constr>@_rb_define_methodr>@_rb_define_singleton_methodr>@_rb_ensurer>@_rb_error_arityr>@_rb_error_frozenr>@_rb_fix2intr>@_rb_float_newr>@_rb_funcallvr>@_rb_gc_force_recycler>@_rb_gc_writebarrier_unprotectr>@_rb_hash_asetr>@_rb_hash_lookupr>@_rb_id2namer>@_rb_intern2r>@_rb_num2dblr>@_rb_num2intr>@_rb_num2uintr>@_rb_obj_allocr>@_rb_obj_call_initr>@_rb_obj_freezer>@_rb_obj_taintedr>@_rb_raiser>@_rb_safe_levelr>@_rb_securer>@_rb_str_appendr>@_rb_str_catr>@_rb_str_cat_cstrr>@_rb_str_catfr>@_rb_str_equalr>@_rb_str_new_staticr>@_rb_str_to_strr>@_rb_string_valuer>@_rb_string_value_cstrr>@_rb_string_value_ptrr>@_rb_sym2idr>@_rb_thread_currentr>@_rb_thread_local_arefr>@_rb_thread_local_asetr>@_rb_vsprintfr@_reallocr>@_ruby_snprintfr>@_ruby_strdupr>@_ruby_xmalloc2r@_strcmpr@_strdupr@_strerrorr@_strlenr@_strncmpr@_sysconfr@_truncater@_write_aggregate_filesmInit_fast_mmaped_filepentrhashmap_sis_pid_significantfile_rbuffer_disposejsmn_next_page_boundarywith_exception load_used MMAPED_FILE em_thod_rge_or_storeto_metricsfetch_entry upsert_entry load_used save_used adding_lengthro$y_ies_to_string hashmap_newputgetremoveiter_foreach$$$get_set_datakeydata$$$%%' sidestroyputgetrecforeachhash_stringalloc_key_stringetizeup_key_alloc_funcs(cess_bufferm_eParsingError *ort_map_entriesave_ ym_ .2closeopen_from_params23eaise_last_exception ad_from_fileserve_mmap_file_bytes79nitter9;==DmovesetElearompare_stringIJLL_nextremoveget_set_dataMMkeydataNOOOPQQparseinitQcs_initaref_mmsync unmap newalloccdghhmopyexception used _errno Ќgauge m livesum pid samples in ax  0 @0P`@  @P ` p@`  0000000000000000000,/  p     . #D 3M 9X <d p<u < G @L N N  O PO O O& O= PT @Ph pP| P P Q 0Q `Q Q Q Q6  RN PRf ^ ^ ^ ^  r  r  r6  rb  r  r  r  r r" rK rv s s r& 7G<Zq0`Pp @`1(K$Z(r'"`(P&'p'&&''>K"[P%j`0&1(PFFKK"5@IJ6U04^7h`2t1~p830 @sE 4ApF*< sG (sT s] sf 0so 8s|B0D!'.=HOW_hpw} "3DVr '4Igu %4@Q^l ! * 9 F U ] e o w     xyz{}~|xyz{}~ _Init_fast_mmaped_file_MMAPED_FILE_aggregate_files_buffer_dispose_entries_to_string_entry_hashmap_foreach_entry_hashmap_get_entry_hashmap_iter_get_data_entry_hashmap_iter_get_key_entry_hashmap_iter_set_data_entry_hashmap_put_entry_hashmap_remove_entry_new_file_close_file_open_from_params_hashmap_alloc_key_string_hashmap_clear_hashmap_compare_string_hashmap_destroy_hashmap_foreach_hashmap_get_hashmap_hash_string_hashmap_init_hashmap_iter_hashmap_iter_get_data_hashmap_iter_get_key_hashmap_iter_next_hashmap_iter_remove_hashmap_iter_set_data_hashmap_put_hashmap_remove_hashmap_reset_hashmap_set_key_alloc_funcs_hashmap_setup_hashmap_size_is_pid_significant_jsmn_init_jsmn_parse_load_used_merge_or_store_method_fetch_entry_method_load_used_method_save_used_method_to_metrics_method_upsert_entry_mm_aref_m_mm_init_mm_msync_mm_s_alloc_mm_s_new_mm_unmap_next_page_boundary_padding_length_process_buffer_prom_eParsingError_raise_last_exception_read_from_file_reserve_mmap_file_bytes_save_exception_save_used_sort_map_entries_sym_gauge_sym_livesum_sym_max_sym_min_sym_pid_sym_samples_with_exception_with_exception_errno___assert_rtn___bzero___error___stack_chk_fail___stack_chk_guard_calloc_close_fclose_fileno_fopen$DARWIN_EXTSN_fread_free_fseek_fstat$INODE64_ftruncate_lseek_malloc_memcpy_memmove_memset_mlock_mmap_msync_munmap_open_qsort_rb_ary_detransient_rb_ary_entry_rb_cObject_rb_cString_rb_check_safe_obj_rb_check_type_rb_data_object_zalloc_rb_define_alloc_func_rb_define_class_rb_define_const_rb_define_method_rb_define_singleton_method_rb_eArgError_rb_eIOError_rb_eIndexError_rb_eNoMemError_rb_eRuntimeError_rb_eSecurityError_rb_eTypeError_rb_ensure_rb_error_arity_rb_error_frozen_rb_fix2int_rb_float_new_rb_funcallv_rb_gc_force_recycle_rb_gc_writebarrier_unprotect_rb_hash_aset_rb_hash_lookup_rb_id2name_rb_intern2_rb_num2dbl_rb_num2int_rb_num2uint_rb_obj_alloc_rb_obj_call_init_rb_obj_freeze_rb_obj_tainted_rb_raise_rb_safe_level_rb_secure_rb_str_append_rb_str_cat_rb_str_cat_cstr_rb_str_catf_rb_str_equal_rb_str_new_static_rb_str_to_str_rb_string_value_rb_string_value_cstr_rb_string_value_ptr_rb_sym2id_rb_thread_current_rb_thread_local_aref_rb_thread_local_aset_rb_vsprintf_realloc_ruby_snprintf_ruby_strdup_ruby_xmalloc2_strcmp_strdup_strerror_strlen_strncmp_sysconf_truncate_writedyld_stub_binder___entry_hashmap_foreach_callback_entry_free_hashmap_hash_entry_hashmap_compare_entry_entry_lexical_comparator_hashmap_rehash_hashmap_entry_remove_mm_free_mm_i_bang_mm_vunlock_mm_protect_bang_mm_recycle_initialize_entry_expand_hashmap_init.cold.1_hashmap_init.cold.2_hashmap_init.cold.3_hashmap_set_key_alloc_funcs.cold.1_hashmap_put.cold.1_hashmap_put.cold.2_hashmap_rehash.cold.1_hashmap_rehash.cold.2_hashmap_get.cold.1_hashmap_get.cold.2_hashmap_remove.cold.1_hashmap_remove.cold.2_hashmap_clear.cold.1_hashmap_reset.cold.1_hashmap_size.cold.1_hashmap_iter.cold.1_hashmap_iter_next.cold.1_hashmap_iter_remove.cold.1_hashmap_foreach.cold.1_hashmap_foreach.cold.2_append_entry_head.help_beg_append_entry_head.help_fin_append_entry_head.type_beg_valid_not_null.null_s__dyld_private_Init_fast_mmaped_file.rb_intern_id_cache_Init_fast_mmaped_file.rb_intern_id_cache.3_Init_fast_mmaped_file.rb_intern_id_cache.5_Init_fast_mmaped_file.rb_intern_id_cache.7_Init_fast_mmaped_file.rb_intern_id_cache.9_Init_fast_mmaped_file.rb_intern_id_cache.11_mm_s_new.rb_intern_id_cache_mm_aref_m.rb_intern_id_cache_raise_last_exception.rb_intern_id_cache_raise_last_exception.rb_intern_id_cache.2_rb_save_exception.rb_intern_id_cache_rb_save_exception.rb_intern_id_cache.6prometheus-client-mmap-0.10.0/lib/prometheus/0000755000004100000410000000000013606417577021200 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/lib/prometheus/client/0000755000004100000410000000000013606417577022456 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/lib/prometheus/client/simple_value.rb0000644000004100000410000000061313606417577025470 0ustar www-datawww-datarequire 'json' module Prometheus module Client class SimpleValue def initialize(_type, _metric_name, _name, _labels, *_args) @value = 0.0 end def set(value) @value = value end def increment(by = 1) @value += by end def get @value end def self.multiprocess false end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/version.rb0000644000004100000410000000011213606417577024462 0ustar www-datawww-datamodule Prometheus module Client VERSION = '0.10.0'.freeze end end prometheus-client-mmap-0.10.0/lib/prometheus/client/mmaped_dict.rb0000644000004100000410000000375613606417577025264 0ustar www-datawww-datarequire 'prometheus/client/helper/mmaped_file' require 'prometheus/client/helper/plain_file' require 'prometheus/client' module Prometheus module Client class ParsingError < StandardError end # A dict of doubles, backed by an mmapped file. # # The file starts with a 4 byte int, indicating how much of it is used. # Then 4 bytes of padding. # There's then a number of entries, consisting of a 4 byte int which is the # size of the next field, a utf-8 encoded string key, padding to an 8 byte # alignment, and then a 8 byte float which is the value. class MmapedDict attr_reader :m, :used, :positions def self.read_all_values(f) Helper::PlainFile.new(f).entries.map do |data, encoded_len, value_offset, _| encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset)) [encoded, value] end end def initialize(m) @mutex = Mutex.new @m = m # @m.mlock # TODO: Ensure memory is locked to RAM @positions = {} read_all_positions.each do |key, pos| @positions[key] = pos end rescue StandardError => e raise ParsingError, "exception #{e} while processing metrics file #{path}" end def read_value(key) @m.fetch_entry(@positions, key, 0.0) end def write_value(key, value) @m.upsert_entry(@positions, key, value) end def path @m.filepath if @m end def close @m.sync @m.close rescue TypeError => e Prometheus::Client.logger.warn("munmap raised error #{e}") end private def init_value(key) @m.add_entry(@positions, key, 0.0) end # Yield (key, pos). No locking is performed. def read_all_positions @m.entries.map do |data, encoded_len, _, absolute_pos| encoded, = data.unpack(format('@4A%d', encoded_len)) [encoded, absolute_pos] end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/uses_value_type.rb0000644000004100000410000000133313606417577026217 0ustar www-datawww-datarequire 'prometheus/client/simple_value' module Prometheus module Client # Module providing convenience methods for creating value_object module UsesValueType def value_class Prometheus::Client.configuration.value_class end def value_object(type, metric_name, name, labels, *args) value_class.new(type, metric_name, name, labels, *args) rescue StandardError => e Prometheus::Client.logger.info("error #{e} while creating instance of #{value_class} defaulting to SimpleValue") Prometheus::Client.logger.debug("error #{e} backtrace #{e.backtrace.join("\n")}") Prometheus::Client::SimpleValue.new(type, metric_name, name, labels) end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/push.rb0000644000004100000410000000346013606417577023765 0ustar www-datawww-data# encoding: UTF-8 require 'net/http' require 'uri' require 'prometheus/client' require 'prometheus/client/formats/text' module Prometheus # Client is a ruby implementation for a Prometheus compatible client. module Client # Push implements a simple way to transmit a given registry to a given # Pushgateway. class Push DEFAULT_GATEWAY = 'http://localhost:9091'.freeze PATH = '/metrics/jobs/%s'.freeze INSTANCE_PATH = '/metrics/jobs/%s/instances/%s'.freeze HEADER = { 'Content-Type' => Formats::Text::CONTENT_TYPE }.freeze attr_reader :job, :instance, :gateway, :path def initialize(job, instance = nil, gateway = nil) @job = job @instance = instance @gateway = gateway || DEFAULT_GATEWAY @uri = parse(@gateway) @path = build_path(job, instance) @http = Net::HTTP.new(@uri.host, @uri.port) end def add(registry) request('POST', registry) end def replace(registry) request('PUT', registry) end def delete @http.send_request('DELETE', path) end private def parse(url) uri = URI.parse(url) if uri.scheme == 'http' uri else raise ArgumentError, 'only HTTP gateway URLs are supported currently.' end rescue URI::InvalidURIError => e raise ArgumentError, "#{url} is not a valid URL: #{e}" end def build_path(job, instance) if instance format(INSTANCE_PATH, URI.escape(job), URI.escape(instance)) else format(PATH, URI.escape(job)) end end def request(method, registry) data = Formats::Text.marshal(registry) @http.send_request(method, path, data, HEADER) end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/metric.rb0000644000004100000410000000376513606417577024301 0ustar www-datawww-datarequire 'thread' require 'prometheus/client/label_set_validator' require 'prometheus/client/uses_value_type' module Prometheus module Client class Metric include UsesValueType attr_reader :name, :docstring, :base_labels def initialize(name, docstring, base_labels = {}) @mutex = Mutex.new @validator = case type when :summary LabelSetValidator.new(['quantile']) when :histogram LabelSetValidator.new(['le']) else LabelSetValidator.new end @values = Hash.new { |hash, key| hash[key] = default(key) } validate_name(name) validate_docstring(docstring) @validator.valid?(base_labels) @name = name @docstring = docstring @base_labels = base_labels end # Returns the value for the given label set def get(labels = {}) label_set = label_set_for(labels) @validator.valid?(label_set) @values[label_set].get end # Returns all label sets with their values def values synchronize do @values.each_with_object({}) do |(labels, value), memo| memo[labels] = value end end end private def touch_default_value @values[label_set_for({})] end def default(labels) value_object(type, @name, @name, labels) end def validate_name(name) return true if name.is_a?(Symbol) raise ArgumentError, 'given name must be a symbol' end def validate_docstring(docstring) return true if docstring.respond_to?(:empty?) && !docstring.empty? raise ArgumentError, 'docstring must be given' end def label_set_for(labels) @validator.validate(@base_labels.merge(labels)) end def synchronize(&block) @mutex.synchronize(&block) end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/rack/0000755000004100000410000000000013606417577023376 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/lib/prometheus/client/rack/exporter.rb0000644000004100000410000000505513606417577025600 0ustar www-datawww-data# encoding: UTF-8 require 'prometheus/client' require 'prometheus/client/formats/text' module Prometheus module Client module Rack # Exporter is a Rack middleware that provides a sample implementation of # a Prometheus HTTP client API. class Exporter attr_reader :app, :registry, :path FORMATS = [Formats::Text].freeze FALLBACK = Formats::Text def initialize(app, options = {}) @app = app @registry = options[:registry] || Client.registry @path = options[:path] || '/metrics' @acceptable = build_dictionary(FORMATS, FALLBACK) end def call(env) if env['PATH_INFO'] == @path format = negotiate(env['HTTP_ACCEPT'], @acceptable) format ? respond_with(format) : not_acceptable(FORMATS) else @app.call(env) end end private def negotiate(accept, formats) accept = '*/*' if accept.to_s.empty? parse(accept).each do |content_type, _| return formats[content_type] if formats.key?(content_type) end nil end def parse(header) header.to_s.split(/\s*,\s*/).map do |type| attributes = type.split(/\s*;\s*/) quality = extract_quality(attributes) [attributes.join('; '), quality] end.sort_by(&:last).reverse end def extract_quality(attributes, default = 1.0) quality = default attributes.delete_if do |attr| quality = attr.split('q=').last.to_f if attr.start_with?('q=') end quality end def respond_with(format) response = if Prometheus::Client.configuration.value_class.multiprocess format.marshal_multiprocess else format.marshal end [ 200, { 'Content-Type' => format::CONTENT_TYPE }, [response], ] end def not_acceptable(formats) types = formats.map { |format| format::MEDIA_TYPE } [ 406, { 'Content-Type' => 'text/plain' }, ["Supported media types: #{types.join(', ')}"], ] end def build_dictionary(formats, fallback) formats.each_with_object('*/*' => fallback) do |format, memo| memo[format::CONTENT_TYPE] = format memo[format::MEDIA_TYPE] = format end end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/rack/collector.rb0000644000004100000410000000470313606417577025715 0ustar www-datawww-data# encoding: UTF-8 require 'prometheus/client' module Prometheus module Client module Rack # Collector is a Rack middleware that provides a sample implementation of # a HTTP tracer. The default label builder can be modified to export a # different set of labels per recorded metric. class Collector attr_reader :app, :registry def initialize(app, options = {}, &label_builder) @app = app @registry = options[:registry] || Client.registry @label_builder = label_builder || DEFAULT_LABEL_BUILDER init_request_metrics init_exception_metrics end def call(env) # :nodoc: trace(env) { @app.call(env) } end protected DEFAULT_LABEL_BUILDER = proc do |env| { method: env['REQUEST_METHOD'].downcase, host: env['HTTP_HOST'].to_s, path: env['PATH_INFO'].to_s, } end def init_request_metrics @requests = @registry.counter( :http_requests_total, 'A counter of the total number of HTTP requests made.', ) @durations = @registry.summary( :http_request_duration_seconds, 'A summary of the response latency.', ) @durations_hist = @registry.histogram( :http_req_duration_seconds, 'A histogram of the response latency.', ) end def init_exception_metrics @exceptions = @registry.counter( :http_exceptions_total, 'A counter of the total number of exceptions raised.', ) end def trace(env) start = Time.now yield.tap do |response| duration = (Time.now - start).to_f record(labels(env, response), duration) end rescue => exception @exceptions.increment(exception: exception.class.name) raise end def labels(env, response) @label_builder.call(env).tap do |labels| labels[:code] = response.first.to_s end end def record(labels, duration) @requests.increment(labels) @durations.observe(labels, duration) @durations_hist.observe(labels, duration) rescue => exception @exceptions.increment(exception: exception.class.name) raise nil end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/summary.rb0000644000004100000410000000322413606417577024501 0ustar www-datawww-datarequire 'prometheus/client/metric' require 'prometheus/client/uses_value_type' module Prometheus module Client # Summary is an accumulator for samples. It captures Numeric data and # provides an efficient quantile calculation mechanism. class Summary < Metric extend Gem::Deprecate # Value represents the state of a Summary at a given point. class Value < Hash include UsesValueType attr_accessor :sum, :total def initialize(type, name, labels) @sum = value_object(type, name, "#{name}_sum", labels) @total = value_object(type, name, "#{name}_count", labels) end def observe(value) @sum.increment(value) @total.increment end end def initialize(name, docstring, base_labels = {}) super(name, docstring, base_labels) end def type :summary end # Records a given value. def observe(labels, value) label_set = label_set_for(labels) synchronize { @values[label_set].observe(value) } end alias add observe deprecate :add, :observe, 2016, 10 # Returns the value for the given label set def get(labels = {}) @validator.valid?(labels) synchronize do @values[labels].sum.get end end # Returns all label sets with their values def values synchronize do @values.each_with_object({}) do |(labels, value), memo| memo[labels] = value.sum end end end private def default(labels) Value.new(type, @name, labels) end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/configuration.rb0000644000004100000410000000120013606417577025643 0ustar www-datawww-datarequire 'prometheus/client/registry' require 'prometheus/client/mmaped_value' require 'logger' require 'tmpdir' module Prometheus module Client class Configuration attr_accessor :value_class, :multiprocess_files_dir, :initial_mmap_file_size, :logger, :pid_provider def initialize @value_class = ::Prometheus::Client::MmapedValue @initial_mmap_file_size = 4 * 1024 @logger = Logger.new($stdout) @pid_provider = Process.method(:pid) @multiprocess_files_dir = ENV.fetch('prometheus_multiproc_dir') do Dir.mktmpdir("prometheus-mmap") end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/formats/0000755000004100000410000000000013606417577024131 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/lib/prometheus/client/formats/text.rb0000644000004100000410000000522613606417577025447 0ustar www-datawww-datarequire 'prometheus/client/uses_value_type' require 'prometheus/client/helper/json_parser' require 'prometheus/client/helper/plain_file' require 'prometheus/client/helper/metrics_processing' require 'prometheus/client/helper/metrics_representation' module Prometheus module Client module Formats # Text format is human readable mainly used for manual inspection. module Text MEDIA_TYPE = 'text/plain'.freeze VERSION = '0.0.4'.freeze CONTENT_TYPE = "#{MEDIA_TYPE}; version=#{VERSION}".freeze class << self def marshal(registry) metrics = registry.metrics.map do |metric| samples = metric.values.flat_map do |label_set, value| representation(metric, label_set, value) end [metric.name, { type: metric.type, help: metric.docstring, samples: samples }] end Helper::MetricsRepresentation.to_text(metrics) end def marshal_multiprocess(path = Prometheus::Client.configuration.multiprocess_files_dir) file_list = Dir.glob(File.join(path, '*.db')).sort .map {|f| Helper::PlainFile.new(f) } .map {|f| [f.filepath, f.multiprocess_mode.to_sym, f.type.to_sym, f.pid] } FastMmapedFile.to_metrics(file_list.to_a) end private def load_metrics(path) metrics = {} Dir.glob(File.join(path, '*.db')).sort.each do |f| Helper::PlainFile.new(f).to_metrics(metrics) end metrics end def representation(metric, label_set, value) labels = metric.base_labels.merge(label_set) if metric.type == :summary summary(metric.name, labels, value) elsif metric.type == :histogram histogram(metric.name, labels, value) else [[metric.name, labels, value.get]] end end def summary(name, set, value) rv = value.get.map do |q, v| [name, set.merge(quantile: q), v] end rv << ["#{name}_sum", set, value.get.sum] rv << ["#{name}_count", set, value.get.total] rv end def histogram(name, set, value) # |metric_name, labels, value| rv = value.get.map do |q, v| [name, set.merge(le: q), v] end rv << [name, set.merge(le: '+Inf'), value.get.total] rv << ["#{name}_sum", set, value.get.sum] rv << ["#{name}_count", set, value.get.total] rv end end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/gauge.rb0000644000004100000410000000175113606417577024077 0ustar www-datawww-data# encoding: UTF-8 require 'prometheus/client/metric' module Prometheus module Client # A Gauge is a metric that exposes merely an instantaneous value or some # snapshot thereof. class Gauge < Metric def initialize(name, docstring, base_labels = {}, multiprocess_mode=:all) super(name, docstring, base_labels) if value_class.multiprocess and ![:min, :max, :livesum, :liveall, :all].include?(multiprocess_mode) raise ArgumentError, 'Invalid multiprocess mode: ' + multiprocess_mode end @multiprocess_mode = multiprocess_mode end def type :gauge end def default(labels) value_object(type, @name, @name, labels, @multiprocess_mode) end # Sets the value for the given label set def set(labels, value) @values[label_set_for(labels)].set(value) end def increment(labels, value) @values[label_set_for(labels)].increment(value) end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/label_set_validator.rb0000644000004100000410000000440113606417577027001 0ustar www-datawww-data# encoding: UTF-8 module Prometheus module Client # LabelSetValidator ensures that all used label sets comply with the # Prometheus specification. class LabelSetValidator # TODO: we might allow setting :instance in the future RESERVED_LABELS = [:job, :instance].freeze class LabelSetError < StandardError; end class InvalidLabelSetError < LabelSetError; end class InvalidLabelError < LabelSetError; end class ReservedLabelError < LabelSetError; end def initialize(reserved_labels = []) @reserved_labels = (reserved_labels + RESERVED_LABELS).freeze @validated = {} end def valid?(labels) unless labels.is_a?(Hash) raise InvalidLabelSetError, "#{labels} is not a valid label set" end labels.all? do |key, value| validate_symbol(key) validate_name(key) validate_reserved_key(key) validate_value(key, value) end end def validate(labels) return labels if @validated.key?(labels.hash) valid?(labels) unless @validated.empty? || match?(labels, @validated.first.last) raise InvalidLabelSetError, 'labels must have the same signature' end @validated[labels.hash] = labels end private def match?(a, b) a.keys.sort == b.keys.sort end def validate_symbol(key) return true if key.is_a?(Symbol) raise InvalidLabelError, "label #{key} is not a symbol" end def validate_name(key) return true unless key.to_s.start_with?('__') raise ReservedLabelError, "label #{key} must not start with __" end def validate_reserved_key(key) return true unless @reserved_labels.include?(key) raise ReservedLabelError, "#{key} is reserved" end def validate_value(key, value) return true if value.is_a?(String) || value.is_a?(Numeric) || value.is_a?(Symbol) || value.is_a?(FalseClass) || value.is_a?(TrueClass) || value.nil? raise InvalidLabelError, "#{key} does not contain a valid value (type #{value.class})" end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/mmaped_value.rb0000644000004100000410000000673113606417577025451 0ustar www-datawww-datarequire 'prometheus/client' require 'prometheus/client/mmaped_dict' require 'json' module Prometheus module Client # A float protected by a mutex backed by a per-process mmaped file. class MmapedValue VALUE_LOCK = Mutex.new @@files = {} @@pid = -1 def initialize(type, metric_name, name, labels, multiprocess_mode = '') @file_prefix = type.to_s @metric_name = metric_name @name = name @labels = labels if type == :gauge @file_prefix += '_' + multiprocess_mode.to_s end @pid = -1 @mutex = Mutex.new initialize_file end def increment(amount = 1) @mutex.synchronize do initialize_file if pid_changed? @value += amount write_value(@key, @value) @value end end def set(value) @mutex.synchronize do initialize_file if pid_changed? @value = value write_value(@key, @value) @value end end def get @mutex.synchronize do initialize_file if pid_changed? return @value end end def pid_changed? @pid != Process.pid end # method needs to be run in VALUE_LOCK mutex def unsafe_reinitialize_file(check_pid = true) unsafe_initialize_file if !check_pid || pid_changed? end def self.reset_and_reinitialize VALUE_LOCK.synchronize do @@pid = Process.pid @@files = {} ObjectSpace.each_object(MmapedValue).each do |v| v.unsafe_reinitialize_file(false) end end end def self.reset_on_pid_change if pid_changed? @@pid = Process.pid @@files = {} end end def self.reinitialize_on_pid_change VALUE_LOCK.synchronize do reset_on_pid_change ObjectSpace.each_object(MmapedValue, &:unsafe_reinitialize_file) end end def self.pid_changed? @@pid != Process.pid end def self.multiprocess true end private def initialize_file VALUE_LOCK.synchronize do unsafe_initialize_file end end def unsafe_initialize_file self.class.reset_on_pid_change @pid = Process.pid unless @@files.has_key?(@file_prefix) unless @file.nil? @file.close end mmaped_file = Helper::MmapedFile.open_exclusive_file(@file_prefix) @@files[@file_prefix] = MmapedDict.new(mmaped_file) end @file = @@files[@file_prefix] @key = rebuild_key @value = read_value(@key) end def rebuild_key labelnames = [] labelvalues = [] @labels.each do |k, v| labelnames << k labelvalues << v end [@metric_name, @name, labelnames, labelvalues].to_json end def write_value(key, val) @file.write_value(key, val) rescue StandardError => e Prometheus::Client.logger.warn("writing value to #{@file.path} failed with #{e}") Prometheus::Client.logger.debug(e.backtrace.join("\n")) end def read_value(key) @file.read_value(key) rescue StandardError => e Prometheus::Client.logger.warn("reading value from #{@file.path} failed with #{e}") Prometheus::Client.logger.debug(e.backtrace.join("\n")) 0 end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/histogram.rb0000644000004100000410000000466013606417577025006 0ustar www-datawww-datarequire 'prometheus/client/metric' require 'prometheus/client/uses_value_type' module Prometheus module Client # A histogram samples observations (usually things like request durations # or response sizes) and counts them in configurable buckets. It also # provides a sum of all observed values. class Histogram < Metric # Value represents the state of a Histogram at a given point. class Value < Hash include UsesValueType attr_accessor :sum, :total, :total_inf def initialize(type, name, labels, buckets) @sum = value_object(type, name, "#{name}_sum", labels) @total = value_object(type, name, "#{name}_count", labels) @total_inf = value_object(type, name, "#{name}_bucket", labels.merge(le: "+Inf")) buckets.each do |bucket| self[bucket] = value_object(type, name, "#{name}_bucket", labels.merge(le: bucket.to_s)) end end def observe(value) @sum.increment(value) @total.increment() @total_inf.increment() each_key do |bucket| self[bucket].increment() if value <= bucket end end def get() hash = {} each_key do |bucket| hash[bucket] = self[bucket].get() end hash end end # DEFAULT_BUCKETS are the default Histogram buckets. The default buckets # are tailored to broadly measure the response time (in seconds) of a # network service. (From DefBuckets client_golang) DEFAULT_BUCKETS = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10].freeze # Offer a way to manually specify buckets def initialize(name, docstring, base_labels = {}, buckets = DEFAULT_BUCKETS) raise ArgumentError, 'Unsorted buckets, typo?' unless sorted? buckets @buckets = buckets super(name, docstring, base_labels) end def type :histogram end def observe(labels, value) label_set = label_set_for(labels) synchronize { @values[label_set].observe(value) } end private def default(labels) # TODO: default function needs to know key of hash info (label names and values) Value.new(type, @name, labels, @buckets) end def sorted?(bucket) bucket.each_cons(2).all? { |i, j| i <= j } end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/support/0000755000004100000410000000000013606417577024172 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/lib/prometheus/client/support/unicorn.rb0000644000004100000410000000142413606417577026175 0ustar www-datawww-datamodule Prometheus module Client module Support module Unicorn def self.worker_pid_provider wid = worker_id if wid.nil? "process_id_#{Process.pid}" else "worker_id_#{wid}" end end def self.worker_id match = $0.match(/worker\[([^\]]+)\]/) if match match[1] else object_based_worker_id end end def self.object_based_worker_id return unless defined?(::Unicorn::Worker) workers = ObjectSpace.each_object(::Unicorn::Worker) return if workers.nil? workers_first = workers.first workers_first.nr unless workers_first.nil? end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/helper/0000755000004100000410000000000013606417577023735 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/lib/prometheus/client/helper/metrics_processing.rb0000644000004100000410000000302513606417577030164 0ustar www-datawww-datamodule Prometheus module Client module Helper module MetricsProcessing def self.merge_metrics(metrics) metrics.each_value do |metric| metric[:samples] = merge_samples(metric[:samples], metric[:type], metric[:multiprocess_mode]).map do |(name, labels), value| [name, labels.to_h, value] end end end def self.merge_samples(raw_samples, metric_type, multiprocess_mode) samples = {} raw_samples.each do |name, labels, value| without_pid = labels.reject { |l| l[0] == 'pid' } case metric_type when :gauge case multiprocess_mode when 'min' s = samples.fetch([name, without_pid], value) samples[[name, without_pid]] = [s, value].min when 'max' s = samples.fetch([name, without_pid], value) samples[[name, without_pid]] = [s, value].max when 'livesum' s = samples.fetch([name, without_pid], 0.0) samples[[name, without_pid]] = s + value else # all/liveall samples[[name, labels]] = value end else # Counter, Histogram and Summary. s = samples.fetch([name, without_pid], 0.0) samples[[name, without_pid]] = s + value end end samples end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/helper/entry_parser.rb0000644000004100000410000001013613606417577027000 0ustar www-datawww-datarequire 'prometheus/client/helper/json_parser' module Prometheus module Client module Helper module EntryParser class ParsingError < RuntimeError; end MINIMUM_SIZE = 8 START_POSITION = 8 VALUE_BYTES = 8 ENCODED_LENGTH_BYTES = 4 def used slice(0..3).unpack('l')[0] end def parts @parts ||= File.basename(filepath, '.db') .split('_') .map { |e| e.gsub(/-\d+$/, '') } # remove trailing -number end def type parts[0].to_sym end def pid (parts[2..-1] || []).join('_') end def multiprocess_mode parts[1] end def empty? size < MINIMUM_SIZE || used.zero? end def entries(ignore_errors = false) return Enumerator.new {} if empty? Enumerator.new do |yielder| used_ = used # cache used to avoid unnecessary unpack operations pos = START_POSITION # used + padding offset while pos < used_ && pos < size && pos > 0 data = slice(pos..-1) unless data raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors pos += 8 next end encoded_len, first_encoded_bytes = data.unpack('LL') if encoded_len.nil? || encoded_len.zero? || first_encoded_bytes.nil? || first_encoded_bytes.zero? # do not parse empty data pos += 8 next end entry_len = ENCODED_LENGTH_BYTES + encoded_len padding_len = 8 - entry_len % 8 value_offset = entry_len + padding_len # align to 8 bytes pos += value_offset if value_offset > 0 && (pos + VALUE_BYTES) <= size # if positions are safe yielder.yield data, encoded_len, value_offset, pos else raise ParsingError, "data slice is nil at pos #{pos}" unless ignore_errors end pos += VALUE_BYTES end end end def parsed_entries(ignore_errors = false) result = entries(ignore_errors).map do |data, encoded_len, value_offset, _| begin encoded, value = data.unpack(format('@4A%d@%dd', encoded_len, value_offset)) [encoded, value] rescue ArgumentError => e Prometheus::Client.logger.debug("Error processing data: #{bin_to_hex(data[0, 7])} len: #{encoded_len} value_offset: #{value_offset}") raise ParsingError, e unless ignore_errors end end result.reject!(&:nil?) if ignore_errors result end def to_metrics(metrics = {}, ignore_errors = false) parsed_entries(ignore_errors).each do |key, value| begin metric_name, name, labelnames, labelvalues = JsonParser.load(key) labelnames ||= [] labelvalues ||= [] metric = metrics.fetch(metric_name, metric_name: metric_name, help: 'Multiprocess metric', type: type, samples: []) if type == :gauge metric[:multiprocess_mode] = multiprocess_mode metric[:samples] += [[name, labelnames.zip(labelvalues) + [['pid', pid]], value]] else # The duplicates and labels are fixed in the next for. metric[:samples] += [[name, labelnames.zip(labelvalues), value]] end metrics[metric_name] = metric rescue JSON::ParserError => e raise ParsingError(e) unless ignore_errors end end metrics.reject! { |e| e.nil? } if ignore_errors metrics end private def bin_to_hex(s) s.each_byte.map { |b| b.to_s(16) }.join end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/helper/json_parser.rb0000644000004100000410000000052613606417577026612 0ustar www-datawww-datarequire 'json' module Prometheus module Client module Helper module JsonParser class << self if defined?(Oj) def load(s) Oj.load(s) end else def load(s) JSON.parse(s) end end end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/helper/file_locker.rb0000644000004100000410000000217713606417577026547 0ustar www-datawww-datamodule Prometheus module Client module Helper class FileLocker class << self LOCK_FILE_MUTEX = Mutex.new def lock_to_process(filepath) LOCK_FILE_MUTEX.synchronize do @file_locks ||= {} return false if @file_locks[filepath] file = File.open(filepath, 'ab') if file.flock(File::LOCK_NB | File::LOCK_EX) @file_locks[filepath] = file return true else return false end end end def unlock(filepath) LOCK_FILE_MUTEX.synchronize do @file_locks ||= {} return false unless @file_locks[filepath] @file_locks.delete(filepath).flock(File::LOCK_UN) end end def unlock_all LOCK_FILE_MUTEX.synchronize do @file_locks ||= {} @file_locks.values.each do |file| file.flock(File::LOCK_UN) end @file_locks = {} end end end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/helper/metrics_representation.rb0000644000004100000410000000265213606417577031057 0ustar www-datawww-datamodule Prometheus module Client module Helper module MetricsRepresentation METRIC_LINE = '%s%s %s'.freeze TYPE_LINE = '# TYPE %s %s'.freeze HELP_LINE = '# HELP %s %s'.freeze LABEL = '%s="%s"'.freeze SEPARATOR = ','.freeze DELIMITER = "\n".freeze REGEX = { doc: /[\n\\]/, label: /[\n\\"]/ }.freeze REPLACE = { "\n" => '\n', '\\' => '\\\\', '"' => '\"' }.freeze def self.to_text(metrics) lines = [] metrics.each do |name, metric| lines << format(HELP_LINE, name, escape(metric[:help])) lines << format(TYPE_LINE, name, metric[:type]) metric[:samples].each do |metric_name, labels, value| lines << metric(metric_name, format_labels(labels), value) end end # there must be a trailing delimiter (lines << nil).join(DELIMITER) end def self.metric(name, labels, value) format(METRIC_LINE, name, labels, value) end def self.format_labels(set) return if set.empty? strings = set.each_with_object([]) do |(key, value), memo| memo << format(LABEL, key, escape(value, :label)) end "{#{strings.join(SEPARATOR)}}" end def self.escape(string, format = :doc) string.to_s.gsub(REGEX[format], REPLACE) end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/helper/mmaped_file.rb0000644000004100000410000000266613606417577026536 0ustar www-datawww-datarequire 'prometheus/client/helper/entry_parser' require 'prometheus/client/helper/file_locker' require 'fast_mmaped_file' module Prometheus module Client module Helper class MmapedFile < FastMmapedFile include EntryParser attr_reader :filepath, :size def initialize(filepath) @filepath = filepath File.open(filepath, 'a+b') do |file| file.truncate(initial_mmap_file_size) if file.size < MINIMUM_SIZE @size = file.size end super(filepath) end def close munmap end private def initial_mmap_file_size Prometheus::Client.configuration.initial_mmap_file_size end public class << self def open(filepath) MmapedFile.new(filepath) end def ensure_exclusive_file(file_prefix = 'mmaped_file') (0..Float::INFINITY).lazy .map { |f_num| "#{file_prefix}_#{Prometheus::Client.pid}-#{f_num}.db" } .map { |filename| File.join(Prometheus::Client.configuration.multiprocess_files_dir, filename) } .find { |path| Helper::FileLocker.lock_to_process(path) } end def open_exclusive_file(file_prefix = 'mmaped_file') filename = Helper::MmapedFile.ensure_exclusive_file(file_prefix) open(filename) end end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/helper/plain_file.rb0000644000004100000410000000102713606417577026364 0ustar www-datawww-datarequire 'prometheus/client/helper/entry_parser' module Prometheus module Client module Helper # Parses DB files without using mmap class PlainFile include EntryParser attr_reader :filepath def source @data ||= File.read(filepath, mode: 'rb') end def initialize(filepath) @filepath = filepath end def slice(*args) source.slice(*args) end def size source.length end end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/registry.rb0000644000004100000410000000273213606417577024657 0ustar www-datawww-data# encoding: UTF-8 require 'thread' require 'prometheus/client/counter' require 'prometheus/client/summary' require 'prometheus/client/gauge' require 'prometheus/client/histogram' module Prometheus module Client # Registry class Registry class AlreadyRegisteredError < StandardError; end def initialize @metrics = {} @mutex = Mutex.new end def register(metric) name = metric.name @mutex.synchronize do if exist?(name.to_sym) raise AlreadyRegisteredError, "#{name} has already been registered" else @metrics[name.to_sym] = metric end end metric end def counter(name, docstring, base_labels = {}) register(Counter.new(name, docstring, base_labels)) end def summary(name, docstring, base_labels = {}) register(Summary.new(name, docstring, base_labels)) end def gauge(name, docstring, base_labels = {}, multiprocess_mode = :all) register(Gauge.new(name, docstring, base_labels, multiprocess_mode)) end def histogram(name, docstring, base_labels = {}, buckets = Histogram::DEFAULT_BUCKETS) register(Histogram.new(name, docstring, base_labels, buckets)) end def exist?(name) @metrics.key?(name) end def get(name) @metrics[name.to_sym] end def metrics @metrics.values end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client/counter.rb0000644000004100000410000000110613606417577024460 0ustar www-datawww-data# encoding: UTF-8 require 'prometheus/client/metric' module Prometheus module Client # Counter is a metric that exposes merely a sum or tally of things. class Counter < Metric def type :counter end def increment(labels = {}, by = 1) raise ArgumentError, 'increment must be a non-negative number' if by < 0 label_set = label_set_for(labels) synchronize { @values[label_set].increment(by) } end private def default(labels) value_object(type, @name, @name, labels) end end end end prometheus-client-mmap-0.10.0/lib/prometheus/client.rb0000644000004100000410000000311013606417577022776 0ustar www-datawww-datarequire 'prometheus/client/registry' require 'prometheus/client/configuration' require 'prometheus/client/mmaped_value' module Prometheus # Client is a ruby implementation for a Prometheus compatible client. module Client class << self attr_writer :configuration def configuration @configuration ||= Configuration.new end def configure yield(configuration) end # Returns a default registry object def registry @registry ||= Registry.new end def logger configuration.logger end def pid configuration.pid_provider.call end # Resets the registry and reinitializes all metrics files. # Use case: clean up everything in specs `before` block, # to prevent leaking the state between specs which are updating metrics. def reset! @registry = nil ::Prometheus::Client::MmapedValue.reset_and_reinitialize end # With `force: false`: reinitializes metric files only for processes with the changed PID. # With `force: true`: reinitializes all metrics files. # Always keeps the registry. # Use case (`force: false`): pick up new metric files on each worker start, # without resetting already registered files for the master or previously initialized workers. def reinitialize_on_pid_change(force: false) if force ::Prometheus::Client::MmapedValue.reset_and_reinitialize else ::Prometheus::Client::MmapedValue.reinitialize_on_pid_change end end end end end prometheus-client-mmap-0.10.0/lib/prometheus.rb0000644000004100000410000000013713606417577021526 0ustar www-datawww-data# Prometheus is a generic time-series collection and computation server. module Prometheus end prometheus-client-mmap-0.10.0/vendor/0000755000004100000410000000000013606417577017534 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/0000755000004100000410000000000013606417577017756 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/jsmn/0000755000004100000410000000000013606417577020725 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/jsmn/test/0000755000004100000410000000000013606417577021704 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/jsmn/test/test.h0000644000004100000410000000104313606417577023032 0ustar www-datawww-data#ifndef __TEST_H__ #define __TEST_H__ static int test_passed = 0; static int test_failed = 0; /* Terminate current test with error */ #define fail() return __LINE__ /* Successful end of the test case */ #define done() return 0 /* Check single condition */ #define check(cond) do { if (!(cond)) fail(); } while (0) /* Test runner */ static void test(int (*func)(void), const char *name) { int r = func(); if (r == 0) { test_passed++; } else { test_failed++; printf("FAILED: %s (at line %d)\n", name, r); } } #endif /* __TEST_H__ */ prometheus-client-mmap-0.10.0/vendor/c/jsmn/test/tests.c0000644000004100000410000002476613606417577023231 0ustar www-datawww-data#include #include #include #include #include "test.h" #include "testutil.h" int test_empty(void) { check(parse("{}", 1, 1, JSMN_OBJECT, 0, 2, 0)); check(parse("[]", 1, 1, JSMN_ARRAY, 0, 2, 0)); check(parse("[{},{}]", 3, 3, JSMN_ARRAY, 0, 7, 2, JSMN_OBJECT, 1, 3, 0, JSMN_OBJECT, 4, 6, 0)); return 0; } int test_object(void) { check(parse("{\"a\":0}", 3, 3, JSMN_OBJECT, 0, 7, 1, JSMN_STRING, "a", 1, JSMN_PRIMITIVE, "0")); check(parse("{\"a\":[]}", 3, 3, JSMN_OBJECT, 0, 8, 1, JSMN_STRING, "a", 1, JSMN_ARRAY, 5, 7, 0)); check(parse("{\"a\":{},\"b\":{}}", 5, 5, JSMN_OBJECT, -1, -1, 2, JSMN_STRING, "a", 1, JSMN_OBJECT, -1, -1, 0, JSMN_STRING, "b", 1, JSMN_OBJECT, -1, -1, 0)); check(parse("{\n \"Day\": 26,\n \"Month\": 9,\n \"Year\": 12\n }", 7, 7, JSMN_OBJECT, -1, -1, 3, JSMN_STRING, "Day", 1, JSMN_PRIMITIVE, "26", JSMN_STRING, "Month", 1, JSMN_PRIMITIVE, "9", JSMN_STRING, "Year", 1, JSMN_PRIMITIVE, "12")); check(parse("{\"a\": 0, \"b\": \"c\"}", 5, 5, JSMN_OBJECT, -1, -1, 2, JSMN_STRING, "a", 1, JSMN_PRIMITIVE, "0", JSMN_STRING, "b", 1, JSMN_STRING, "c", 0)); #ifdef JSMN_STRICT check(parse("{\"a\"\n0}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\", 0}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\": {2}}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\": {2: 3}}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\": {\"a\": 2 3}}", JSMN_ERROR_INVAL, 5)); /* FIXME */ /*check(parse("{\"a\"}", JSMN_ERROR_INVAL, 2));*/ /*check(parse("{\"a\": 1, \"b\"}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{\"a\",\"b\":1}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{\"a\":1,}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{\"a\":\"b\":\"c\"}", JSMN_ERROR_INVAL, 4));*/ /*check(parse("{,}", JSMN_ERROR_INVAL, 4));*/ #endif return 0; } int test_array(void) { /* FIXME */ /*check(parse("[10}", JSMN_ERROR_INVAL, 3));*/ /*check(parse("[1,,3]", JSMN_ERROR_INVAL, 3)*/ check(parse("[10]", 2, 2, JSMN_ARRAY, -1, -1, 1, JSMN_PRIMITIVE, "10")); check(parse("{\"a\": 1]", JSMN_ERROR_INVAL, 3)); /* FIXME */ /*check(parse("[\"a\": 1]", JSMN_ERROR_INVAL, 3));*/ return 0; } int test_primitive(void) { check(parse("{\"boolVar\" : true }", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "boolVar", 1, JSMN_PRIMITIVE, "true")); check(parse("{\"boolVar\" : false }", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "boolVar", 1, JSMN_PRIMITIVE, "false")); check(parse("{\"nullVar\" : null }", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "nullVar", 1, JSMN_PRIMITIVE, "null")); check(parse("{\"intVar\" : 12}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "intVar", 1, JSMN_PRIMITIVE, "12")); check(parse("{\"floatVar\" : 12.345}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "floatVar", 1, JSMN_PRIMITIVE, "12.345")); return 0; } int test_string(void) { check(parse("{\"strVar\" : \"hello world\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "strVar", 1, JSMN_STRING, "hello world", 0)); check(parse("{\"strVar\" : \"escapes: \\/\\r\\n\\t\\b\\f\\\"\\\\\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "strVar", 1, JSMN_STRING, "escapes: \\/\\r\\n\\t\\b\\f\\\"\\\\", 0)); check(parse("{\"strVar\": \"\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "strVar", 1, JSMN_STRING, "", 0)); check(parse("{\"a\":\"\\uAbcD\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_STRING, "\\uAbcD", 0)); check(parse("{\"a\":\"str\\u0000\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_STRING, "str\\u0000", 0)); check(parse("{\"a\":\"\\uFFFFstr\"}", 3, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_STRING, "\\uFFFFstr", 0)); check(parse("{\"a\":[\"\\u0280\"]}", 4, 4, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_ARRAY, -1, -1, 1, JSMN_STRING, "\\u0280", 0)); check(parse("{\"a\":\"str\\uFFGFstr\"}", JSMN_ERROR_INVAL, 3)); check(parse("{\"a\":\"str\\u@FfF\"}", JSMN_ERROR_INVAL, 3)); check(parse("{{\"a\":[\"\\u028\"]}", JSMN_ERROR_INVAL, 4)); return 0; } int test_partial_string(void) { int i; int r; jsmn_parser p; jsmntok_t tok[5]; const char *js = "{\"x\": \"va\\\\ue\", \"y\": \"value y\"}"; jsmn_init(&p); for (i = 1; i <= strlen(js); i++) { r = jsmn_parse(&p, js, i, tok, sizeof(tok)/sizeof(tok[0])); if (i == strlen(js)) { check(r == 5); check(tokeq(js, tok, 5, JSMN_OBJECT, -1, -1, 2, JSMN_STRING, "x", 1, JSMN_STRING, "va\\\\ue", 0, JSMN_STRING, "y", 1, JSMN_STRING, "value y", 0)); } else { check(r == JSMN_ERROR_PART); } } return 0; } int test_partial_array(void) { #ifdef JSMN_STRICT int r; int i; jsmn_parser p; jsmntok_t tok[10]; const char *js = "[ 1, true, [123, \"hello\"]]"; jsmn_init(&p); for (i = 1; i <= strlen(js); i++) { r = jsmn_parse(&p, js, i, tok, sizeof(tok)/sizeof(tok[0])); if (i == strlen(js)) { check(r == 6); check(tokeq(js, tok, 6, JSMN_ARRAY, -1, -1, 3, JSMN_PRIMITIVE, "1", JSMN_PRIMITIVE, "true", JSMN_ARRAY, -1, -1, 2, JSMN_PRIMITIVE, "123", JSMN_STRING, "hello", 0)); } else { check(r == JSMN_ERROR_PART); } } #endif return 0; } int test_array_nomem(void) { int i; int r; jsmn_parser p; jsmntok_t toksmall[10], toklarge[10]; const char *js; js = " [ 1, true, [123, \"hello\"]]"; for (i = 0; i < 6; i++) { jsmn_init(&p); memset(toksmall, 0, sizeof(toksmall)); memset(toklarge, 0, sizeof(toklarge)); r = jsmn_parse(&p, js, strlen(js), toksmall, i); check(r == JSMN_ERROR_NOMEM); memcpy(toklarge, toksmall, sizeof(toksmall)); r = jsmn_parse(&p, js, strlen(js), toklarge, 10); check(r >= 0); check(tokeq(js, toklarge, 4, JSMN_ARRAY, -1, -1, 3, JSMN_PRIMITIVE, "1", JSMN_PRIMITIVE, "true", JSMN_ARRAY, -1, -1, 2, JSMN_PRIMITIVE, "123", JSMN_STRING, "hello", 0)); } return 0; } int test_unquoted_keys(void) { #ifndef JSMN_STRICT int r; jsmn_parser p; jsmntok_t tok[10]; const char *js; jsmn_init(&p); js = "key1: \"value\"\nkey2 : 123"; r = jsmn_parse(&p, js, strlen(js), tok, 10); check(r >= 0); check(tokeq(js, tok, 4, JSMN_PRIMITIVE, "key1", JSMN_STRING, "value", 0, JSMN_PRIMITIVE, "key2", JSMN_PRIMITIVE, "123")); #endif return 0; } int test_issue_22(void) { int r; jsmn_parser p; jsmntok_t tokens[128]; const char *js; js = "{ \"height\":10, \"layers\":[ { \"data\":[6,6], \"height\":10, " "\"name\":\"Calque de Tile 1\", \"opacity\":1, \"type\":\"tilelayer\", " "\"visible\":true, \"width\":10, \"x\":0, \"y\":0 }], " "\"orientation\":\"orthogonal\", \"properties\": { }, \"tileheight\":32, " "\"tilesets\":[ { \"firstgid\":1, \"image\":\"..\\/images\\/tiles.png\", " "\"imageheight\":64, \"imagewidth\":160, \"margin\":0, \"name\":\"Tiles\", " "\"properties\":{}, \"spacing\":0, \"tileheight\":32, \"tilewidth\":32 }], " "\"tilewidth\":32, \"version\":1, \"width\":10 }"; jsmn_init(&p); r = jsmn_parse(&p, js, strlen(js), tokens, 128); check(r >= 0); return 0; } int test_issue_27(void) { const char *js = "{ \"name\" : \"Jack\", \"age\" : 27 } { \"name\" : \"Anna\", "; check(parse(js, JSMN_ERROR_PART, 8)); return 0; } int test_input_length(void) { const char *js; int r; jsmn_parser p; jsmntok_t tokens[10]; js = "{\"a\": 0}garbage"; jsmn_init(&p); r = jsmn_parse(&p, js, 8, tokens, 10); check(r == 3); check(tokeq(js, tokens, 3, JSMN_OBJECT, -1, -1, 1, JSMN_STRING, "a", 1, JSMN_PRIMITIVE, "0")); return 0; } int test_count(void) { jsmn_parser p; const char *js; js = "{}"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 1); js = "[]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 1); js = "[[]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 2); js = "[[], []]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 3); js = "[[], []]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 3); js = "[[], [[]], [[], []]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 7); js = "[\"a\", [[], []]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 5); js = "[[], \"[], [[]]\", [[]]]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 5); js = "[1, 2, 3]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 4); js = "[1, 2, [3, \"a\"], null]"; jsmn_init(&p); check(jsmn_parse(&p, js, strlen(js), NULL, 0) == 7); return 0; } int test_nonstrict(void) { #ifndef JSMN_STRICT const char *js; js = "a: 0garbage"; check(parse(js, 2, 2, JSMN_PRIMITIVE, "a", JSMN_PRIMITIVE, "0garbage")); js = "Day : 26\nMonth : Sep\n\nYear: 12"; check(parse(js, 6, 6, JSMN_PRIMITIVE, "Day", JSMN_PRIMITIVE, "26", JSMN_PRIMITIVE, "Month", JSMN_PRIMITIVE, "Sep", JSMN_PRIMITIVE, "Year", JSMN_PRIMITIVE, "12")); //nested {s don't cause a parse error. js = "\"key {1\": 1234"; check(parse(js, 2, 2, JSMN_STRING, "key {1", 1, JSMN_PRIMITIVE, "1234")); #endif return 0; } int test_unmatched_brackets(void) { const char *js; js = "\"key 1\": 1234}"; check(parse(js, JSMN_ERROR_INVAL, 2)); js = "{\"key 1\": 1234"; check(parse(js, JSMN_ERROR_PART, 3)); js = "{\"key 1\": 1234}}"; check(parse(js, JSMN_ERROR_INVAL, 3)); js = "\"key 1\"}: 1234"; check(parse(js, JSMN_ERROR_INVAL, 3)); js = "{\"key {1\": 1234}"; check(parse(js, 3, 3, JSMN_OBJECT, 0, 16, 1, JSMN_STRING, "key {1", 1, JSMN_PRIMITIVE, "1234")); js = "{{\"key 1\": 1234}"; check(parse(js, JSMN_ERROR_PART, 4)); return 0; } int main(void) { test(test_empty, "test for a empty JSON objects/arrays"); test(test_object, "test for a JSON objects"); test(test_array, "test for a JSON arrays"); test(test_primitive, "test primitive JSON data types"); test(test_string, "test string JSON data types"); test(test_partial_string, "test partial JSON string parsing"); test(test_partial_array, "test partial array reading"); test(test_array_nomem, "test array reading with a smaller number of tokens"); test(test_unquoted_keys, "test unquoted keys (like in JavaScript)"); test(test_input_length, "test strings that are not null-terminated"); test(test_issue_22, "test issue #22"); test(test_issue_27, "test issue #27"); test(test_count, "test tokens count estimation"); test(test_nonstrict, "test for non-strict mode"); test(test_unmatched_brackets, "test for unmatched brackets"); printf("\nPASSED: %d\nFAILED: %d\n", test_passed, test_failed); return (test_failed > 0); } prometheus-client-mmap-0.10.0/vendor/c/jsmn/test/testutil.h0000644000004100000410000000412613606417577023735 0ustar www-datawww-data#ifndef __TEST_UTIL_H__ #define __TEST_UTIL_H__ #include "../jsmn.c" static int vtokeq(const char *s, jsmntok_t *t, int numtok, va_list ap) { if (numtok > 0) { int i, start, end, size; int type; char *value; size = -1; value = NULL; for (i = 0; i < numtok; i++) { type = va_arg(ap, int); if (type == JSMN_STRING) { value = va_arg(ap, char *); size = va_arg(ap, int); start = end = -1; } else if (type == JSMN_PRIMITIVE) { value = va_arg(ap, char *); start = end = size = -1; } else { start = va_arg(ap, int); end = va_arg(ap, int); size = va_arg(ap, int); value = NULL; } if (t[i].type != type) { printf("token %d type is %d, not %d\n", i, t[i].type, type); return 0; } if (start != -1 && end != -1) { if (t[i].start != start) { printf("token %d start is %d, not %d\n", i, t[i].start, start); return 0; } if (t[i].end != end ) { printf("token %d end is %d, not %d\n", i, t[i].end, end); return 0; } } if (size != -1 && t[i].size != size) { printf("token %d size is %d, not %d\n", i, t[i].size, size); return 0; } if (s != NULL && value != NULL) { const char *p = s + t[i].start; if (strlen(value) != t[i].end - t[i].start || strncmp(p, value, t[i].end - t[i].start) != 0) { printf("token %d value is %.*s, not %s\n", i, t[i].end-t[i].start, s+t[i].start, value); return 0; } } } } return 1; } static int tokeq(const char *s, jsmntok_t *tokens, int numtok, ...) { int ok; va_list args; va_start(args, numtok); ok = vtokeq(s, tokens, numtok, args); va_end(args); return ok; } static int parse(const char *s, int status, int numtok, ...) { int r; int ok = 1; va_list args; jsmn_parser p; jsmntok_t *t = malloc(numtok * sizeof(jsmntok_t)); jsmn_init(&p); r = jsmn_parse(&p, s, strlen(s), t, numtok); if (r != status) { printf("status is %d, not %d\n", r, status); return 0; } if (status >= 0) { va_start(args, numtok); ok = vtokeq(s, t, numtok, args); va_end(args); } free(t); return ok; } #endif /* __TEST_UTIL_H__ */ prometheus-client-mmap-0.10.0/vendor/c/jsmn/Makefile0000644000004100000410000000160213606417577022364 0ustar www-datawww-data# You can put your build options here -include config.mk all: libjsmn.a libjsmn.a: jsmn.o $(AR) rc $@ $^ %.o: %.c jsmn.h $(CC) -c $(CFLAGS) $< -o $@ test: test_default test_strict test_links test_strict_links test_default: test/tests.c $(CC) $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ test_strict: test/tests.c $(CC) -DJSMN_STRICT=1 $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ test_links: test/tests.c $(CC) -DJSMN_PARENT_LINKS=1 $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ test_strict_links: test/tests.c $(CC) -DJSMN_STRICT=1 -DJSMN_PARENT_LINKS=1 $(CFLAGS) $(LDFLAGS) $< -o test/$@ ./test/$@ jsmn_test.o: jsmn_test.c libjsmn.a simple_example: example/simple.o libjsmn.a $(CC) $(LDFLAGS) $^ -o $@ jsondump: example/jsondump.o libjsmn.a $(CC) $(LDFLAGS) $^ -o $@ clean: rm -f *.o example/*.o rm -f *.a *.so rm -f simple_example rm -f jsondump .PHONY: all clean test prometheus-client-mmap-0.10.0/vendor/c/jsmn/README.md0000644000004100000410000001275713606417577022220 0ustar www-datawww-dataJSMN ==== [![Build Status](https://travis-ci.org/zserge/jsmn.svg?branch=master)](https://travis-ci.org/zserge/jsmn) jsmn (pronounced like 'jasmine') is a minimalistic JSON parser in C. It can be easily integrated into resource-limited or embedded projects. You can find more information about JSON format at [json.org][1] Library sources are available at https://github.com/zserge/jsmn The web page with some information about jsmn can be found at [http://zserge.com/jsmn.html][2] Philosophy ---------- Most JSON parsers offer you a bunch of functions to load JSON data, parse it and extract any value by its name. jsmn proves that checking the correctness of every JSON packet or allocating temporary objects to store parsed JSON fields often is an overkill. JSON format itself is extremely simple, so why should we complicate it? jsmn is designed to be **robust** (it should work fine even with erroneous data), **fast** (it should parse data on the fly), **portable** (no superfluous dependencies or non-standard C extensions). And of course, **simplicity** is a key feature - simple code style, simple algorithm, simple integration into other projects. Features -------- * compatible with C89 * no dependencies (even libc!) * highly portable (tested on x86/amd64, ARM, AVR) * about 200 lines of code * extremely small code footprint * API contains only 2 functions * no dynamic memory allocation * incremental single-pass parsing * library code is covered with unit-tests Design ------ The rudimentary jsmn object is a **token**. Let's consider a JSON string: '{ "name" : "Jack", "age" : 27 }' It holds the following tokens: * Object: `{ "name" : "Jack", "age" : 27}` (the whole object) * Strings: `"name"`, `"Jack"`, `"age"` (keys and some values) * Number: `27` In jsmn, tokens do not hold any data, but point to token boundaries in JSON string instead. In the example above jsmn will create tokens like: Object [0..31], String [3..7], String [12..16], String [20..23], Number [27..29]. Every jsmn token has a type, which indicates the type of corresponding JSON token. jsmn supports the following token types: * Object - a container of key-value pairs, e.g.: `{ "foo":"bar", "x":0.3 }` * Array - a sequence of values, e.g.: `[ 1, 2, 3 ]` * String - a quoted sequence of chars, e.g.: `"foo"` * Primitive - a number, a boolean (`true`, `false`) or `null` Besides start/end positions, jsmn tokens for complex types (like arrays or objects) also contain a number of child items, so you can easily follow object hierarchy. This approach provides enough information for parsing any JSON data and makes it possible to use zero-copy techniques. Install ------- To clone the repository you should have Git installed. Just run: $ git clone https://github.com/zserge/jsmn Repository layout is simple: jsmn.c and jsmn.h are library files, tests are in the jsmn\_test.c, you will also find README, LICENSE and Makefile files inside. To build the library, run `make`. It is also recommended to run `make test`. Let me know, if some tests fail. If build was successful, you should get a `libjsmn.a` library. The header file you should include is called `"jsmn.h"`. API --- Token types are described by `jsmntype_t`: typedef enum { JSMN_UNDEFINED = 0, JSMN_OBJECT = 1, JSMN_ARRAY = 2, JSMN_STRING = 3, JSMN_PRIMITIVE = 4 } jsmntype_t; **Note:** Unlike JSON data types, primitive tokens are not divided into numbers, booleans and null, because one can easily tell the type using the first character: * 't', 'f' - boolean * 'n' - null * '-', '0'..'9' - number Token is an object of `jsmntok_t` type: typedef struct { jsmntype_t type; // Token type int start; // Token start position int end; // Token end position int size; // Number of child (nested) tokens } jsmntok_t; **Note:** string tokens point to the first character after the opening quote and the previous symbol before final quote. This was made to simplify string extraction from JSON data. All job is done by `jsmn_parser` object. You can initialize a new parser using: jsmn_parser parser; jsmntok_t tokens[10]; jsmn_init(&parser); // js - pointer to JSON string // tokens - an array of tokens available // 10 - number of tokens available jsmn_parse(&parser, js, strlen(js), tokens, 10); This will create a parser, and then it tries to parse up to 10 JSON tokens from the `js` string. A non-negative return value of `jsmn_parse` is the number of tokens actually used by the parser. Passing NULL instead of the tokens array would not store parsing results, but instead the function will return the value of tokens needed to parse the given string. This can be useful if you don't know yet how many tokens to allocate. If something goes wrong, you will get an error. Error will be one of these: * `JSMN_ERROR_INVAL` - bad token, JSON string is corrupted * `JSMN_ERROR_NOMEM` - not enough tokens, JSON string is too large * `JSMN_ERROR_PART` - JSON string is too short, expecting more JSON data If you get `JSON_ERROR_NOMEM`, you can re-allocate more tokens and call `jsmn_parse` once more. If you read json data from the stream, you can periodically call `jsmn_parse` and check if return value is `JSON_ERROR_PART`. You will get this error until you reach the end of JSON data. Other info ---------- This software is distributed under [MIT license](http://www.opensource.org/licenses/mit-license.php), so feel free to integrate it in your commercial products. [1]: http://www.json.org/ [2]: http://zserge.com/jsmn.html prometheus-client-mmap-0.10.0/vendor/c/jsmn/library.json0000644000004100000410000000056013606417577023265 0ustar www-datawww-data{ "name": "jsmn", "keywords": "json", "description": "Minimalistic JSON parser/tokenizer in C. It can be easily integrated into resource-limited or embedded projects", "repository": { "type": "git", "url": "https://github.com/zserge/jsmn.git" }, "frameworks": "*", "platforms": "*", "examples": [ "example/*.c" ], "exclude": "test" } prometheus-client-mmap-0.10.0/vendor/c/jsmn/LICENSE0000644000004100000410000000204513606417577021733 0ustar www-datawww-dataCopyright (c) 2010 Serge A. Zaitsev Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. prometheus-client-mmap-0.10.0/vendor/c/jsmn/jsmn.h0000644000004100000410000000313613606417577022050 0ustar www-datawww-data#ifndef __JSMN_H_ #define __JSMN_H_ #include #ifdef __cplusplus extern "C" { #endif /** * JSON type identifier. Basic types are: * o Object * o Array * o String * o Other primitive: number, boolean (true/false) or null */ typedef enum { JSMN_UNDEFINED = 0, JSMN_OBJECT = 1, JSMN_ARRAY = 2, JSMN_STRING = 3, JSMN_PRIMITIVE = 4 } jsmntype_t; enum jsmnerr { /* Not enough tokens were provided */ JSMN_ERROR_NOMEM = -1, /* Invalid character inside JSON string */ JSMN_ERROR_INVAL = -2, /* The string is not a full JSON packet, more bytes expected */ JSMN_ERROR_PART = -3 }; /** * JSON token description. * type type (object, array, string etc.) * start start position in JSON data string * end end position in JSON data string */ typedef struct { jsmntype_t type; int start; int end; int size; #ifdef JSMN_PARENT_LINKS int parent; #endif } jsmntok_t; /** * JSON parser. Contains an array of token blocks available. Also stores * the string being parsed now and current position in that string */ typedef struct { unsigned int pos; /* offset in the JSON string */ unsigned int toknext; /* next token to allocate */ int toksuper; /* superior token node, e.g parent object or array */ } jsmn_parser; /** * Create JSON parser over an array of tokens */ void jsmn_init(jsmn_parser *parser); /** * Run JSON parser. It parses a JSON data string into and array of tokens, each describing * a single JSON object. */ int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, unsigned int num_tokens); #ifdef __cplusplus } #endif #endif /* __JSMN_H_ */ prometheus-client-mmap-0.10.0/vendor/c/jsmn/example/0000755000004100000410000000000013606417577022360 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/jsmn/example/jsondump.c0000644000004100000410000000541713606417577024372 0ustar www-datawww-data#include #include #include #include #include #include "../jsmn.h" /* Function realloc_it() is a wrapper function for standart realloc() * with one difference - it frees old memory pointer in case of realloc * failure. Thus, DO NOT use old data pointer in anyway after call to * realloc_it(). If your code has some kind of fallback algorithm if * memory can't be re-allocated - use standart realloc() instead. */ static inline void *realloc_it(void *ptrmem, size_t size) { void *p = realloc(ptrmem, size); if (!p) { free (ptrmem); fprintf(stderr, "realloc(): errno=%d\n", errno); } return p; } /* * An example of reading JSON from stdin and printing its content to stdout. * The output looks like YAML, but I'm not sure if it's really compatible. */ static int dump(const char *js, jsmntok_t *t, size_t count, int indent) { int i, j, k; if (count == 0) { return 0; } if (t->type == JSMN_PRIMITIVE) { printf("%.*s", t->end - t->start, js+t->start); return 1; } else if (t->type == JSMN_STRING) { printf("'%.*s'", t->end - t->start, js+t->start); return 1; } else if (t->type == JSMN_OBJECT) { printf("\n"); j = 0; for (i = 0; i < t->size; i++) { for (k = 0; k < indent; k++) printf(" "); j += dump(js, t+1+j, count-j, indent+1); printf(": "); j += dump(js, t+1+j, count-j, indent+1); printf("\n"); } return j+1; } else if (t->type == JSMN_ARRAY) { j = 0; printf("\n"); for (i = 0; i < t->size; i++) { for (k = 0; k < indent-1; k++) printf(" "); printf(" - "); j += dump(js, t+1+j, count-j, indent+1); printf("\n"); } return j+1; } return 0; } int main() { int r; int eof_expected = 0; char *js = NULL; size_t jslen = 0; char buf[BUFSIZ]; jsmn_parser p; jsmntok_t *tok; size_t tokcount = 2; /* Prepare parser */ jsmn_init(&p); /* Allocate some tokens as a start */ tok = malloc(sizeof(*tok) * tokcount); if (tok == NULL) { fprintf(stderr, "malloc(): errno=%d\n", errno); return 3; } for (;;) { /* Read another chunk */ r = fread(buf, 1, sizeof(buf), stdin); if (r < 0) { fprintf(stderr, "fread(): %d, errno=%d\n", r, errno); return 1; } if (r == 0) { if (eof_expected != 0) { return 0; } else { fprintf(stderr, "fread(): unexpected EOF\n"); return 2; } } js = realloc_it(js, jslen + r + 1); if (js == NULL) { return 3; } strncpy(js + jslen, buf, r); jslen = jslen + r; again: r = jsmn_parse(&p, js, jslen, tok, tokcount); if (r < 0) { if (r == JSMN_ERROR_NOMEM) { tokcount = tokcount * 2; tok = realloc_it(tok, sizeof(*tok) * tokcount); if (tok == NULL) { return 3; } goto again; } } else { dump(js, tok, p.toknext, 0); eof_expected = 1; } } return EXIT_SUCCESS; } prometheus-client-mmap-0.10.0/vendor/c/jsmn/example/simple.c0000644000004100000410000000424413606417577024021 0ustar www-datawww-data#include #include #include #include "../jsmn.h" /* * A small example of jsmn parsing when JSON structure is known and number of * tokens is predictable. */ static const char *JSON_STRING = "{\"user\": \"johndoe\", \"admin\": false, \"uid\": 1000,\n " "\"groups\": [\"users\", \"wheel\", \"audio\", \"video\"]}"; static int jsoneq(const char *json, jsmntok_t *tok, const char *s) { if (tok->type == JSMN_STRING && (int) strlen(s) == tok->end - tok->start && strncmp(json + tok->start, s, tok->end - tok->start) == 0) { return 0; } return -1; } int main() { int i; int r; jsmn_parser p; jsmntok_t t[128]; /* We expect no more than 128 tokens */ jsmn_init(&p); r = jsmn_parse(&p, JSON_STRING, strlen(JSON_STRING), t, sizeof(t)/sizeof(t[0])); if (r < 0) { printf("Failed to parse JSON: %d\n", r); return 1; } /* Assume the top-level element is an object */ if (r < 1 || t[0].type != JSMN_OBJECT) { printf("Object expected\n"); return 1; } /* Loop over all keys of the root object */ for (i = 1; i < r; i++) { if (jsoneq(JSON_STRING, &t[i], "user") == 0) { /* We may use strndup() to fetch string value */ printf("- User: %.*s\n", t[i+1].end-t[i+1].start, JSON_STRING + t[i+1].start); i++; } else if (jsoneq(JSON_STRING, &t[i], "admin") == 0) { /* We may additionally check if the value is either "true" or "false" */ printf("- Admin: %.*s\n", t[i+1].end-t[i+1].start, JSON_STRING + t[i+1].start); i++; } else if (jsoneq(JSON_STRING, &t[i], "uid") == 0) { /* We may want to do strtol() here to get numeric value */ printf("- UID: %.*s\n", t[i+1].end-t[i+1].start, JSON_STRING + t[i+1].start); i++; } else if (jsoneq(JSON_STRING, &t[i], "groups") == 0) { int j; printf("- Groups:\n"); if (t[i+1].type != JSMN_ARRAY) { continue; /* We expect groups to be an array of strings */ } for (j = 0; j < t[i+1].size; j++) { jsmntok_t *g = &t[i+j+2]; printf(" * %.*s\n", g->end - g->start, JSON_STRING + g->start); } i += t[i+1].size + 1; } else { printf("Unexpected key: %.*s\n", t[i].end-t[i].start, JSON_STRING + t[i].start); } } return EXIT_SUCCESS; } prometheus-client-mmap-0.10.0/vendor/c/jsmn/jsmn.c0000644000004100000410000001725313606417577022050 0ustar www-datawww-data#include "jsmn.h" /** * Allocates a fresh unused token from the token pull. */ static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *tok; if (parser->toknext >= num_tokens) { return NULL; } tok = &tokens[parser->toknext++]; tok->start = tok->end = -1; tok->size = 0; #ifdef JSMN_PARENT_LINKS tok->parent = -1; #endif return tok; } /** * Fills token type and boundaries. */ static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type, int start, int end) { token->type = type; token->start = start; token->end = end; token->size = 0; } /** * Fills next available token with JSON primitive. */ static int jsmn_parse_primitive(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *token; int start; start = parser->pos; for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { switch (js[parser->pos]) { #ifndef JSMN_STRICT /* In strict mode primitive must be followed by "," or "}" or "]" */ case ':': #endif case '\t' : case '\r' : case '\n' : case ' ' : case ',' : case ']' : case '}' : goto found; } if (js[parser->pos] < 32 || js[parser->pos] >= 127) { parser->pos = start; return JSMN_ERROR_INVAL; } } #ifdef JSMN_STRICT /* In strict mode primitive must be followed by a comma/object/array */ parser->pos = start; return JSMN_ERROR_PART; #endif found: if (tokens == NULL) { parser->pos--; return 0; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) { parser->pos = start; return JSMN_ERROR_NOMEM; } jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos); #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif parser->pos--; return 0; } /** * Fills next token with JSON string. */ static int jsmn_parse_string(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *token; int start = parser->pos; parser->pos++; /* Skip starting quote */ for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { char c = js[parser->pos]; /* Quote: end of string */ if (c == '\"') { if (tokens == NULL) { return 0; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) { parser->pos = start; return JSMN_ERROR_NOMEM; } jsmn_fill_token(token, JSMN_STRING, start+1, parser->pos); #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif return 0; } /* Backslash: Quoted symbol expected */ if (c == '\\' && parser->pos + 1 < len) { int i; parser->pos++; switch (js[parser->pos]) { /* Allowed escaped symbols */ case '\"': case '/' : case '\\' : case 'b' : case 'f' : case 'r' : case 'n' : case 't' : break; /* Allows escaped symbol \uXXXX */ case 'u': parser->pos++; for(i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0'; i++) { /* If it isn't a hex character we have an error */ if(!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */ (js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */ (js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */ parser->pos = start; return JSMN_ERROR_INVAL; } parser->pos++; } parser->pos--; break; /* Unexpected symbol */ default: parser->pos = start; return JSMN_ERROR_INVAL; } } } parser->pos = start; return JSMN_ERROR_PART; } /** * Parse JSON string and fill tokens. */ int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, unsigned int num_tokens) { int r; int i; jsmntok_t *token; int count = parser->toknext; for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { char c; jsmntype_t type; c = js[parser->pos]; switch (c) { case '{': case '[': count++; if (tokens == NULL) { break; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) return JSMN_ERROR_NOMEM; if (parser->toksuper != -1) { tokens[parser->toksuper].size++; #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif } token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY); token->start = parser->pos; parser->toksuper = parser->toknext - 1; break; case '}': case ']': if (tokens == NULL) break; type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY); #ifdef JSMN_PARENT_LINKS if (parser->toknext < 1) { return JSMN_ERROR_INVAL; } token = &tokens[parser->toknext - 1]; for (;;) { if (token->start != -1 && token->end == -1) { if (token->type != type) { return JSMN_ERROR_INVAL; } token->end = parser->pos + 1; parser->toksuper = token->parent; break; } if (token->parent == -1) { if(token->type != type || parser->toksuper == -1) { return JSMN_ERROR_INVAL; } break; } token = &tokens[token->parent]; } #else for (i = parser->toknext - 1; i >= 0; i--) { token = &tokens[i]; if (token->start != -1 && token->end == -1) { if (token->type != type) { return JSMN_ERROR_INVAL; } parser->toksuper = -1; token->end = parser->pos + 1; break; } } /* Error if unmatched closing bracket */ if (i == -1) return JSMN_ERROR_INVAL; for (; i >= 0; i--) { token = &tokens[i]; if (token->start != -1 && token->end == -1) { parser->toksuper = i; break; } } #endif break; case '\"': r = jsmn_parse_string(parser, js, len, tokens, num_tokens); if (r < 0) return r; count++; if (parser->toksuper != -1 && tokens != NULL) tokens[parser->toksuper].size++; break; case '\t' : case '\r' : case '\n' : case ' ': break; case ':': parser->toksuper = parser->toknext - 1; break; case ',': if (tokens != NULL && parser->toksuper != -1 && tokens[parser->toksuper].type != JSMN_ARRAY && tokens[parser->toksuper].type != JSMN_OBJECT) { #ifdef JSMN_PARENT_LINKS parser->toksuper = tokens[parser->toksuper].parent; #else for (i = parser->toknext - 1; i >= 0; i--) { if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) { if (tokens[i].start != -1 && tokens[i].end == -1) { parser->toksuper = i; break; } } } #endif } break; #ifdef JSMN_STRICT /* In strict mode primitives are: numbers and booleans */ case '-': case '0': case '1' : case '2': case '3' : case '4': case '5': case '6': case '7' : case '8': case '9': case 't': case 'f': case 'n' : /* And they must not be keys of the object */ if (tokens != NULL && parser->toksuper != -1) { jsmntok_t *t = &tokens[parser->toksuper]; if (t->type == JSMN_OBJECT || (t->type == JSMN_STRING && t->size != 0)) { return JSMN_ERROR_INVAL; } } #else /* In non-strict mode every unquoted value is a primitive */ default: #endif r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens); if (r < 0) return r; count++; if (parser->toksuper != -1 && tokens != NULL) tokens[parser->toksuper].size++; break; #ifdef JSMN_STRICT /* Unexpected char in strict mode */ default: return JSMN_ERROR_INVAL; #endif } } if (tokens != NULL) { for (i = parser->toknext - 1; i >= 0; i--) { /* Unmatched opened object or array */ if (tokens[i].start != -1 && tokens[i].end == -1) { return JSMN_ERROR_PART; } } } return count; } /** * Creates a new parser based over a given buffer with an array of tokens * available. */ void jsmn_init(jsmn_parser *parser) { parser->pos = 0; parser->toknext = 0; parser->toksuper = -1; } prometheus-client-mmap-0.10.0/vendor/c/hashmap/0000755000004100000410000000000013606417577021377 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/hashmap/test/0000755000004100000410000000000013606417577022356 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/hashmap/test/Makefile0000644000004100000410000000057313606417577024023 0ustar www-datawww-dataTOP_DIR := $(CURDIR)/.. SRC_DIR := $(TOP_DIR)/src TARGET_EXEC := hashmap_test SOURCES := $(SRC_DIR)/hashmap.c hashmap_test.c CFLAGS += -O0 -g -ggdb -Wall -Wunused -Werror -I$(SRC_DIR) -DHASHMAP_METRICS OBJS := $(SOURCES:%.c=%.o) test: $(TARGET_EXEC) $(TARGET_EXEC): $(OBJS) @echo EXEC $@ ; \ $(CC) -o $@ $(OBJS) clean: rm -f $(OBJS) $(TARGET_EXEC) .PHONY: test clean prometheus-client-mmap-0.10.0/vendor/c/hashmap/test/hashmap_test.c0000644000004100000410000003146513606417577025213 0ustar www-datawww-data/* * Copyright (c) 2016-2017 David Leeds * * Hashmap is free software; you can redistribute it and/or modify * it under the terms of the MIT license. See LICENSE for details. */ #include #include #include #include #include #include #include #include #define ARRAY_LEN(array) (sizeof(array) / sizeof(array[0])) #define TEST_NUM_KEYS 196607 /* Results in max load factor */ #define TEST_KEY_STR_LEN 32 void **keys_str_random; void **keys_str_sequential; void **keys_int_random; void **keys_int_sequential; struct hashmap str_map; struct hashmap int_map; struct test { const char *name; const char *description; bool (*run)(struct hashmap *map, void **keys); bool pre_load; }; /* * Test type-specific generation macros */ HASHMAP_FUNCS_DECLARE(test, const void, void) HASHMAP_FUNCS_CREATE(test, const void, void) uint64_t test_time_us(void) { struct timespec now; if (clock_gettime(CLOCK_MONOTONIC, &now)) { assert(0); } return ((uint64_t)now.tv_sec) * 1000000 + (uint64_t)(now.tv_nsec / 1000); } void **test_keys_alloc(size_t num) { void **keys; keys = (void **)calloc(num, sizeof(void *)); if (!keys) { printf("malloc failed\n"); exit(1); } return keys; } void *test_key_alloc_random_str(void) { size_t i; unsigned num; char *key; key = (char *)malloc(TEST_KEY_STR_LEN + 1); if (!key) { printf("malloc failed\n"); exit(1); } for (i = 0; i < TEST_KEY_STR_LEN; ++i) { num = random(); num = (num % 96) + 32; /* ASCII printable only */ key[i] = (char)num; } key[TEST_KEY_STR_LEN] = '\0'; return key; } void *test_key_alloc_random_int(void) { uint64_t *key; key = (uint64_t *)malloc(sizeof(*key)); if (!key) { printf("malloc failed\n"); exit(1); } /* RAND_MAX is not guaranteed to be more than 32K */ *key = (uint64_t)(random() & 0xffff) << 48 | (uint64_t)(random() & 0xffff) << 32 | (uint64_t)(random() & 0xffff) << 16 | (uint64_t)(random() & 0xffff); return key; } void *test_key_alloc_sequential_str(size_t index) { char *key; key = (char *)malloc(TEST_KEY_STR_LEN + 1); if (!key) { printf("malloc failed\n"); exit(1); } snprintf(key, TEST_KEY_STR_LEN + 1, "sequential key! %010zu", index); return key; } void *test_key_alloc_sequential_int(size_t index) { uint64_t *key; key = (uint64_t *)malloc(sizeof(*key)); if (!key) { printf("malloc failed\n"); exit(1); } *key = index; return key; } void test_keys_generate(void) { size_t i; srandom(99); /* Use reproducible random sequences */ keys_str_random = test_keys_alloc(TEST_NUM_KEYS + 1); keys_str_sequential = test_keys_alloc(TEST_NUM_KEYS + 1); keys_int_random = test_keys_alloc(TEST_NUM_KEYS + 1); keys_int_sequential = test_keys_alloc(TEST_NUM_KEYS + 1); for (i = 0; i < TEST_NUM_KEYS; ++i) { keys_str_random[i] = test_key_alloc_random_str(); keys_str_sequential[i] = test_key_alloc_sequential_str(i); keys_int_random[i] = test_key_alloc_random_int(); keys_int_sequential[i] = test_key_alloc_sequential_int(i); } keys_str_random[i] = NULL; keys_str_sequential[i] = NULL; keys_int_random[i] = NULL; keys_int_sequential[i] = NULL; } void test_load_keys(struct hashmap *map, void **keys) { void **key; for (key = keys; *key; ++key) { if (!test_hashmap_put(map, *key, *key)) { printf("hashmap_put() failed"); exit(1); } } } void test_reset_map(struct hashmap *map) { hashmap_reset(map); } void test_print_stats(struct hashmap *map, const char *label) { printf("Hashmap stats: %s\n", label); printf(" # entries: %zu\n", hashmap_size(map)); printf(" Table size: %zu\n", map->table_size); printf(" Load factor: %.4f\n", hashmap_load_factor(map)); printf(" Collisions mean: %.4f\n", hashmap_collisions_mean(map)); printf(" Collisions variance: %.4f\n", hashmap_collisions_variance(map)); } bool test_run(struct hashmap *map, void **keys, const struct test *t) { bool success; uint64_t time_us; assert(t != NULL); assert(t->name != NULL); assert(t->run != NULL); if (t->pre_load) { printf("Pre-loading keys..."); test_load_keys(map, keys); printf("done\n"); } printf("Running...\n"); time_us = test_time_us(); success = t->run(map, keys); time_us = test_time_us() - time_us; if (success) { printf("Completed successfully\n"); } else { printf("Failed\n"); } printf("Run time: %llu microseconds\n", (long long unsigned)time_us); test_print_stats(map, t->name); test_reset_map(map); return success; } bool test_run_all(struct hashmap *map, void **keys, const struct test *tests, size_t num_tests, const char *env) { const struct test *t; size_t num_failed = 0; printf("\n**************************************************\n"); printf("Starting test series:\n"); printf(" %s\n", env); printf("**************************************************\n\n"); for (t = tests; t < &tests[num_tests]; ++t) { printf("\n**************************************************" "\n"); printf("Test %02u: %s\n", (unsigned)(t - tests) + 1, t->name); if (t->description) { printf(" Description: %s\n", t->description); } printf("\n"); if (!test_run(map, keys, t)) { ++num_failed; } } printf("\n**************************************************\n"); printf("Test results:\n"); printf(" Passed: %zu\n", num_tests - num_failed); printf(" Failed: %zu\n", num_failed); printf("**************************************************\n"); return (num_failed == 0); } size_t test_hash_uint64(const void *key) { const uint8_t *byte = (const uint8_t *)key; uint8_t i; size_t hash = 0; for (i = 0; i < sizeof(uint64_t); ++i, ++byte) { hash += *byte; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash; } int test_compare_uint64(const void *a, const void *b) { return *(int64_t *)a - *(int64_t *)b; } bool test_put(struct hashmap *map, void **keys) { void **key; void *data; for (key = keys; *key; ++key) { data = test_hashmap_put(map, *key, *key); if (!data) { printf("malloc failed\n"); exit(1); } if (data != *key) { printf("duplicate key found\n"); return false; } } return true; } bool test_put_existing(struct hashmap *map, void **keys) { void **key; void *data; int temp_data = 99; for (key = keys; *key; ++key) { data = hashmap_put(map, *key, &temp_data); if (!data) { printf("malloc failed\n"); exit(1); } if (data != *key) { printf("did not return existing data\n"); return false; } } return true; } bool test_get(struct hashmap *map, void **keys) { void **key; void *data; for (key = keys; *key; ++key) { data = test_hashmap_get(map, *key); if (!data) { printf("entry not found\n"); return false; } if (data != *key) { printf("got wrong entry\n"); return false; } } return true; } bool test_get_nonexisting(struct hashmap *map, void **keys) { void **key; void *data; const char *fake_key = "test_get_nonexisting fake key!"; for (key = keys; *key; ++key) { data = hashmap_get(map, fake_key); if (data) { printf("unexpected entry found\n"); return false; } } return true; } bool test_remove(struct hashmap *map, void **keys) { void **key; void *data; for (key = keys; *key; ++key) { data = test_hashmap_remove(map, *key); if (!data) { printf("entry not found\n"); return false; } if (data != *key) { printf("removed wrong entry\n"); return false; } } return true; } bool test_put_remove(struct hashmap *map, void **keys) { size_t i = 0; void **key; void *data; if (!test_put(map, keys)) { return false; } for (key = keys; *key; ++key) { if (i++ >= TEST_NUM_KEYS / 2) { break; } data = test_hashmap_remove(map, *key); if (!data) { printf("key not found\n"); return false; } if (data != *key) { printf("removed wrong entry\n"); return false; } } test_print_stats(map, "test_put_remove done"); i = 0; for (key = keys; *key; ++key) { if (i++ >= TEST_NUM_KEYS / 2) { break; } data = test_hashmap_put(map, *key, *key); if (!data) { printf("malloc failed\n"); exit(1); } if (data != *key) { printf("duplicate key found\n"); return false; } } return true; } bool test_iterate(struct hashmap *map, void **keys) { size_t i = 0; struct hashmap_iter *iter = hashmap_iter(map); for (; iter; iter = hashmap_iter_next(map, iter)) { ++i; } if (i != TEST_NUM_KEYS) { printf("did not iterate through all entries: " "observed %zu, expected %u\n", i, TEST_NUM_KEYS); return false; } return true; } bool test_iterate_remove(struct hashmap *map, void **keys) { size_t i = 0; struct hashmap_iter *iter = hashmap_iter(map); const void *key; while (iter) { ++i; key = test_hashmap_iter_get_key(iter); if (test_hashmap_get(map, key) != key) { printf("invalid iterator on entry #%zu\n", i); return false; } iter = hashmap_iter_remove(map, iter); if (test_hashmap_get(map, key) != NULL) { printf("iter_remove failed on entry #%zu\n", i); return false; } } if (i != TEST_NUM_KEYS) { printf("did not iterate through all entries: " "observed %zu, expected %u\n", i, TEST_NUM_KEYS); return false; } return true; } struct test_foreach_arg { struct hashmap *map; size_t i; }; int test_foreach_callback(const void *key, void *data, void *arg) { struct test_foreach_arg *state = (struct test_foreach_arg *)arg; if (state->i & 1) { /* Remove every other key */ if (!test_hashmap_remove(state->map, key)) { printf("could not remove expected key\n"); return -1; } } ++state->i; return 0; } bool test_foreach(struct hashmap *map, void **keys) { struct test_foreach_arg arg = { map, 1 }; size_t size = hashmap_size(map); if (test_hashmap_foreach(map, test_foreach_callback, &arg) < 0) { return false; } if (hashmap_size(map) != size / 2) { printf("foreach delete did not remove expected # of entries: " "contains %zu vs. expected %zu\n", hashmap_size(map), size / 2); return false; } return true; } bool test_clear(struct hashmap *map, void **keys) { hashmap_clear(map); return true; } bool test_reset(struct hashmap *map, void **keys) { hashmap_reset(map); return true; } const struct test const tests[] = { { .name = "put performance", .description = "put new hash keys", .run = test_put }, { .name = "put existing performance", .description = "attempt to put existing hash keys", .run = test_put_existing, .pre_load = true }, { .name = "get existing performance", .description = "get existing hash keys", .run = test_get, .pre_load = true }, { .name = "get non-existing performance", .description = "get nonexistent hash keys", .run = test_get_nonexisting, .pre_load = true }, { .name = "remove performance", .description = "remove hash keys", .run = test_remove, .pre_load = true }, { .name = "mixed put/remove performance", .description = "put, remove 1/2, then put them back", .run = test_put_remove }, { .name = "iterate performance", .description = "iterate through entries", .run = test_iterate, .pre_load = true }, { .name = "iterate remove all", .description = "iterate and remove all entries", .run = test_iterate_remove, .pre_load = true }, { .name = "removal in foreach", .description = "iterate and delete 1/2 using hashmap_foreach", .run = test_foreach, .pre_load = true }, { .name = "clear performance", .description = "clear entries", .run = test_clear, .pre_load = true }, { .name = "reset performance", .description = "reset entries", .run = test_reset, .pre_load = true } }; /* * Main function */ int main(int argc, char **argv) { bool success = true; /* Initialize */ printf("Initializing hash maps..."); if (hashmap_init(&str_map, hashmap_hash_string, hashmap_compare_string, 0) < 0) { success = false; } /* hashmap_set_key_alloc_funcs(&str_map, hashmap_alloc_key_string, free); */ if (hashmap_init(&int_map, test_hash_uint64, test_compare_uint64, 0) < 0) { success = false; } printf("done\n"); if (!success) { printf("Hashmap init failed"); return 1; } printf("Generating test %u test keys...", TEST_NUM_KEYS); test_keys_generate(); printf("done\n"); printf("Running tests\n\n"); success &= test_run_all(&str_map, keys_str_random, tests, ARRAY_LEN(tests), "Hashmap w/randomized string keys"); success &= test_run_all(&str_map, keys_str_sequential, tests, ARRAY_LEN(tests), "Hashmap w/sequential string keys"); success &= test_run_all(&int_map, keys_int_random, tests, ARRAY_LEN(tests), "Hashmap w/randomized integer keys"); success &= test_run_all(&int_map, keys_int_sequential, tests, ARRAY_LEN(tests), "Hashmap w/sequential integer keys"); printf("\nTests finished\n"); hashmap_destroy(&str_map); hashmap_destroy(&int_map); if (!success) { printf("Tests FAILED\n"); exit(1); } return 0; } prometheus-client-mmap-0.10.0/vendor/c/hashmap/README.md0000644000004100000410000001062613606417577022663 0ustar www-datawww-data# hashmap Flexible hashmap implementation in C using open addressing and linear probing for collision resolution. ### Summary This project came into existence because there are a notable lack of flexible and easy to use data structures available in C. Sure, higher level languages have built-in libraries, but plenty of embedded projects or higher level libraries start with core C code. It was undesirable to add a bulky library like Glib as a dependency to my projects, or grapple with a restrictive license agreement. Searching for "C hashmap" yielded results with questionable algorithms and code quality, projects with difficult or inflexible interfaces, or projects with less desirable licenses. I decided it was time to create my own. ### Goals * **To scale gracefully to the full capacity of the numeric primitives in use.** E.g. on a 32-bit machine, you should be able to load a billion+ entries without hitting any bugs relating to integer overflows. Lookups on a hashtable with a billion entries should be performed in close to constant time, no different than lookups in a hashtable with 20 entries. Automatic rehashing occurs and maintains a load factor of 0.75 or less. * **To provide a clean and easy-to-use interface.** C data structures often struggle to strike a balance between flexibility and ease of use. To this end, I provided a generic interface using void pointers for keys and data, and macros to generate type-specific wrapper functions, if desired. * **To enable easy iteration and safe entry removal during iteration.** Applications often need these features, and the data structure should not hold them back. Both an iterator interface and a foreach function was provided to satisfy various use-cases. This hashmap also uses an open addressing scheme, which has superior iteration performance to a similar hashmap implemented using separate chaining (buckets with linked lists). This is because fewer instructions are needed per iteration, and array traversal has superior cache performance than linked list traversal. * **To use a very unrestrictive software license.** Using no license was an option, but I wanted to allow the code to be tracked, simply for my own edification. I chose the MIT license because it is the most common open source license in use, and it grants full rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell the code. Basically, take this code and do what you want with it. Just be nice and leave the license comment and my name at top of the file. Feel free to add your name if you are modifying and redistributing. ### Code Example ```C #include #include #include /* Some sample data structure with a string key */ struct blob { char key[32]; size_t data_len; unsigned char data[1024]; }; /* Declare type-specific blob_hashmap_* functions with this handy macro */ HASHMAP_FUNCS_CREATE(blob, const char, struct blob) struct blob *blob_load(void) { struct blob *b; /* * Hypothetical function that allocates and loads blob structures * from somewhere. Returns NULL when there are no more blobs to load. */ return b; } /* Hashmap structure */ struct hashmap map; int main(int argc, char **argv) { struct blob *b; struct hashmap_iter *iter; /* Initialize with default string key functions and init size */ hashmap_init(&map, hashmap_hash_string, hashmap_compare_string, 0); /* Load some sample data into the map and discard duplicates */ while ((b = blob_load()) != NULL) { if (blob_hashmap_put(&map, b->key, b) != b) { printf("discarding blob with duplicate key: %s\n", b->key); free(b); } } /* Lookup a blob with key "AbCdEf" */ b = blob_hashmap_get(&map, "AbCdEf"); if (b) { printf("Found blob[%s]\n", b->key); } /* Iterate through all blobs and print each one */ for (iter = hashmap_iter(&map); iter; iter = hashmap_iter_next(&map, iter)) { printf("blob[%s]: data_len %zu bytes\n", blob_hashmap_iter_get_key(iter), blob_hashmap_iter_get_data(iter)->data_len); } /* Remove all blobs with no data */ iter = hashmap_iter(&map); while (iter) { b = blob_hashmap_iter_get_data(iter); if (b->data_len == 0) { iter = hashmap_iter_remove(&map, iter); free(b); } else { iter = hashmap_iter_next(&map, iter); } } /* Free all allocated resources associated with map and reset its state */ hashmap_destroy(&map); return 0; } ``` prometheus-client-mmap-0.10.0/vendor/c/hashmap/LICENSE0000644000004100000410000000205413606417577022405 0ustar www-datawww-dataMIT License Copyright (c) 2016 David Leeds Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. prometheus-client-mmap-0.10.0/vendor/c/hashmap/_config.yml0000644000004100000410000000003113606417577023520 0ustar www-datawww-datatheme: jekyll-theme-slateprometheus-client-mmap-0.10.0/vendor/c/hashmap/src/0000755000004100000410000000000013606417577022166 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/vendor/c/hashmap/src/hashmap.c0000644000004100000410000004053613606417577023763 0ustar www-datawww-data/* * Copyright (c) 2016-2017 David Leeds * * Hashmap is free software; you can redistribute it and/or modify * it under the terms of the MIT license. See LICENSE for details. */ #include #include #include #include #include #include "hashmap.h" #ifndef HASHMAP_NOASSERT #include #define HASHMAP_ASSERT(expr) assert(expr) #else #define HASHMAP_ASSERT(expr) #endif /* Table sizes must be powers of 2 */ #define HASHMAP_SIZE_MIN (1 << 5) /* 32 */ #define HASHMAP_SIZE_DEFAULT (1 << 8) /* 256 */ #define HASHMAP_SIZE_MOD(map, val) ((val) & ((map)->table_size - 1)) /* Limit for probing is 1/2 of table_size */ #define HASHMAP_PROBE_LEN(map) ((map)->table_size >> 1) /* Return the next linear probe index */ #define HASHMAP_PROBE_NEXT(map, index) HASHMAP_SIZE_MOD(map, (index) + 1) /* Check if index b is less than or equal to index a */ #define HASHMAP_INDEX_LE(map, a, b) \ ((a) == (b) || (((b) - (a)) & ((map)->table_size >> 1)) != 0) struct hashmap_entry { void *key; void *data; #ifdef HASHMAP_METRICS size_t num_collisions; #endif }; /* * Enforce a maximum 0.75 load factor. */ static inline size_t hashmap_table_min_size_calc(size_t num_entries) { return num_entries + (num_entries / 3); } /* * Calculate the optimal table size, given the specified max number * of elements. */ static size_t hashmap_table_size_calc(size_t num_entries) { size_t table_size; size_t min_size; table_size = hashmap_table_min_size_calc(num_entries); /* Table size is always a power of 2 */ min_size = HASHMAP_SIZE_MIN; while (min_size < table_size) { min_size <<= 1; } return min_size; } /* * Get a valid hash table index from a key. */ static inline size_t hashmap_calc_index(const struct hashmap *map, const void *key) { return HASHMAP_SIZE_MOD(map, map->hash(key)); } /* * Return the next populated entry, starting with the specified one. * Returns NULL if there are no more valid entries. */ static struct hashmap_entry *hashmap_entry_get_populated( const struct hashmap *map, struct hashmap_entry *entry) { for (; entry < &map->table[map->table_size]; ++entry) { if (entry->key) { return entry; } } return NULL; } /* * Find the hashmap entry with the specified key, or an empty slot. * Returns NULL if the entire table has been searched without finding a match. */ static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map, const void *key, bool find_empty) { size_t i; size_t index; size_t probe_len = HASHMAP_PROBE_LEN(map); struct hashmap_entry *entry; index = hashmap_calc_index(map, key); /* Linear probing */ for (i = 0; i < probe_len; ++i) { entry = &map->table[index]; if (!entry->key) { if (find_empty) { #ifdef HASHMAP_METRICS entry->num_collisions = i; #endif return entry; } return NULL; } if (map->key_compare(key, entry->key) == 0) { return entry; } index = HASHMAP_PROBE_NEXT(map, index); } return NULL; } /* * Removes the specified entry and processes the proceeding entries to reduce * the load factor and keep the chain continuous. This is a required * step for hash maps using linear probing. */ static void hashmap_entry_remove(struct hashmap *map, struct hashmap_entry *removed_entry) { size_t i; #ifdef HASHMAP_METRICS size_t removed_i = 0; #endif size_t index; size_t entry_index; size_t removed_index = (removed_entry - map->table); struct hashmap_entry *entry; /* Free the key */ if (map->key_free) { map->key_free(removed_entry->key); } --map->num_entries; /* Fill the free slot in the chain */ index = HASHMAP_PROBE_NEXT(map, removed_index); for (i = 1; i < map->table_size; ++i) { entry = &map->table[index]; if (!entry->key) { /* Reached end of chain */ break; } entry_index = hashmap_calc_index(map, entry->key); /* Shift in entries with an index <= to the removed slot */ if (HASHMAP_INDEX_LE(map, removed_index, entry_index)) { #ifdef HASHMAP_METRICS entry->num_collisions -= (i - removed_i); removed_i = i; #endif memcpy(removed_entry, entry, sizeof(*removed_entry)); removed_index = index; removed_entry = entry; } index = HASHMAP_PROBE_NEXT(map, index); } /* Clear the last removed entry */ memset(removed_entry, 0, sizeof(*removed_entry)); } /* * Reallocates the hash table to the new size and rehashes all entries. * new_size MUST be a power of 2. * Returns 0 on success and -errno on allocation or hash function failure. */ static int hashmap_rehash(struct hashmap *map, size_t new_size) { size_t old_size; struct hashmap_entry *old_table; struct hashmap_entry *new_table; struct hashmap_entry *entry; struct hashmap_entry *new_entry; HASHMAP_ASSERT(new_size >= HASHMAP_SIZE_MIN); HASHMAP_ASSERT((new_size & (new_size - 1)) == 0); new_table = (struct hashmap_entry *)calloc(new_size, sizeof(struct hashmap_entry)); if (!new_table) { return -ENOMEM; } /* Backup old elements in case of rehash failure */ old_size = map->table_size; old_table = map->table; map->table_size = new_size; map->table = new_table; /* Rehash */ for (entry = old_table; entry < &old_table[old_size]; ++entry) { if (!entry->data) { /* Only copy entries with data */ continue; } new_entry = hashmap_entry_find(map, entry->key, true); if (!new_entry) { /* * The load factor is too high with the new table * size, or a poor hash function was used. */ goto revert; } /* Shallow copy (intentionally omits num_collisions) */ new_entry->key = entry->key; new_entry->data = entry->data; } free(old_table); return 0; revert: map->table_size = old_size; map->table = old_table; free(new_table); return -EINVAL; } /* * Iterate through all entries and free all keys. */ static void hashmap_free_keys(struct hashmap *map) { struct hashmap_iter *iter; if (!map->key_free) { return; } for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) { map->key_free((void *)hashmap_iter_get_key(iter)); } } /* * Initialize an empty hashmap. A hash function and a key comparator are * required. * * hash_func should return an even distribution of numbers between 0 * and SIZE_MAX varying on the key provided. * * key_compare_func should return 0 if the keys match, and non-zero otherwise. * * initial_size is optional, and may be set to the max number of entries * expected to be put in the hash table. This is used as a hint to * pre-allocate the hash table to the minimum size needed to avoid * gratuitous rehashes. If initial_size 0, a default size will be used. * * Returns 0 on success and -errno on failure. */ int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *), int (*key_compare_func)(const void *, const void *), size_t initial_size) { HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(hash_func != NULL); HASHMAP_ASSERT(key_compare_func != NULL); if (!initial_size) { initial_size = HASHMAP_SIZE_DEFAULT; } else { /* Convert init size to valid table size */ initial_size = hashmap_table_size_calc(initial_size); } map->table_size_init = initial_size; map->table_size = initial_size; map->num_entries = 0; map->table = (struct hashmap_entry *)calloc(initial_size, sizeof(struct hashmap_entry)); if (!map->table) { return -ENOMEM; } map->hash = hash_func; map->key_compare = key_compare_func; map->key_alloc = NULL; map->key_free = NULL; return 0; } /* * Free the hashmap and all associated memory. */ void hashmap_destroy(struct hashmap *map) { if (!map) { return; } hashmap_free_keys(map); free(map->table); memset(map, 0, sizeof(*map)); } /* * Enable internal memory management of hash keys. */ void hashmap_set_key_alloc_funcs(struct hashmap *map, void *(*key_alloc_func)(const void *), void (*key_free_func)(void *)) { HASHMAP_ASSERT(map != NULL); map->key_alloc = key_alloc_func; map->key_free = key_free_func; } /* * Add an entry to the hashmap. If an entry with a matching key already * exists and has a data pointer associated with it, the existing data * pointer is returned, instead of assigning the new value. Compare * the return value with the data passed in to determine if a new entry was * created. Returns NULL if memory allocation failed. */ void *hashmap_put(struct hashmap *map, const void *key, void *data) { struct hashmap_entry *entry; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); /* Rehash with 2x capacity if load factor is approaching 0.75 */ if (map->table_size <= hashmap_table_min_size_calc(map->num_entries)) { hashmap_rehash(map, map->table_size << 1); } entry = hashmap_entry_find(map, key, true); if (!entry) { /* * Cannot find an empty slot. Either out of memory, or using * a poor hash function. Attempt to rehash once to reduce * chain length. */ if (hashmap_rehash(map, map->table_size << 1) < 0) { return NULL; } entry = hashmap_entry_find(map, key, true); if (!entry) { return NULL; } } if (!entry->key) { /* Allocate copy of key to simplify memory management */ if (map->key_alloc) { entry->key = map->key_alloc(key); if (!entry->key) { return NULL; } } else { entry->key = (void *)key; } ++map->num_entries; } else if (entry->data) { /* Do not overwrite existing data */ return entry->data; } entry->data = data; return data; } /* * Return the data pointer, or NULL if no entry exists. */ void *hashmap_get(const struct hashmap *map, const void *key) { struct hashmap_entry *entry; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); entry = hashmap_entry_find(map, key, false); if (!entry) { return NULL; } return entry->data; } /* * Remove an entry with the specified key from the map. * Returns the data pointer, or NULL, if no entry was found. */ void *hashmap_remove(struct hashmap *map, const void *key) { struct hashmap_entry *entry; void *data; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); entry = hashmap_entry_find(map, key, false); if (!entry) { return NULL; } data = entry->data; /* Clear the entry and make the chain contiguous */ hashmap_entry_remove(map, entry); return data; } /* * Remove all entries. */ void hashmap_clear(struct hashmap *map) { HASHMAP_ASSERT(map != NULL); hashmap_free_keys(map); map->num_entries = 0; memset(map->table, 0, sizeof(struct hashmap_entry) * map->table_size); } /* * Remove all entries and reset the hash table to its initial size. */ void hashmap_reset(struct hashmap *map) { struct hashmap_entry *new_table; HASHMAP_ASSERT(map != NULL); hashmap_clear(map); if (map->table_size == map->table_size_init) { return; } new_table = (struct hashmap_entry *)realloc(map->table, sizeof(struct hashmap_entry) * map->table_size_init); if (!new_table) { return; } map->table = new_table; map->table_size = map->table_size_init; } /* * Return the number of entries in the hash map. */ size_t hashmap_size(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); return map->num_entries; } /* * Get a new hashmap iterator. The iterator is an opaque * pointer that may be used with hashmap_iter_*() functions. * Hashmap iterators are INVALID after a put or remove operation is performed. * hashmap_iter_remove() allows safe removal during iteration. */ struct hashmap_iter *hashmap_iter(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return NULL; } return (struct hashmap_iter *)hashmap_entry_get_populated(map, map->table); } /* * Return an iterator to the next hashmap entry. Returns NULL if there are * no more entries. */ struct hashmap_iter *hashmap_iter_next(const struct hashmap *map, const struct hashmap_iter *iter) { struct hashmap_entry *entry = (struct hashmap_entry *)iter; HASHMAP_ASSERT(map != NULL); if (!iter) { return NULL; } return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry + 1); } /* * Remove the hashmap entry pointed to by this iterator and return an * iterator to the next entry. Returns NULL if there are no more entries. */ struct hashmap_iter *hashmap_iter_remove(struct hashmap *map, const struct hashmap_iter *iter) { struct hashmap_entry *entry = (struct hashmap_entry *)iter; HASHMAP_ASSERT(map != NULL); if (!iter) { return NULL; } if (!entry->key) { /* Iterator is invalid, so just return the next valid entry */ return hashmap_iter_next(map, iter); } hashmap_entry_remove(map, entry); return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry); } /* * Return the key of the entry pointed to by the iterator. */ const void *hashmap_iter_get_key(const struct hashmap_iter *iter) { if (!iter) { return NULL; } return (const void *)((struct hashmap_entry *)iter)->key; } /* * Return the data of the entry pointed to by the iterator. */ void *hashmap_iter_get_data(const struct hashmap_iter *iter) { if (!iter) { return NULL; } return ((struct hashmap_entry *)iter)->data; } /* * Set the data pointer of the entry pointed to by the iterator. */ void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data) { if (!iter) { return; } ((struct hashmap_entry *)iter)->data = data; } /* * Invoke func for each entry in the hashmap. Unlike the hashmap_iter_*() * interface, this function supports calls to hashmap_remove() during iteration. * However, it is an error to put or remove an entry other than the current one, * and doing so will immediately halt iteration and return an error. * Iteration is stopped if func returns non-zero. Returns func's return * value if it is < 0, otherwise, 0. */ int hashmap_foreach(const struct hashmap *map, int (*func)(const void *, void *, void *), void *arg) { struct hashmap_entry *entry; size_t num_entries; const void *key; int rc; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(func != NULL); entry = map->table; for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } num_entries = map->num_entries; key = entry->key; rc = func(entry->key, entry->data, arg); if (rc < 0) { return rc; } if (rc > 0) { return 0; } /* Run this entry again if func() deleted it */ if (entry->key != key) { --entry; } else if (num_entries != map->num_entries) { /* Stop immediately if func put/removed another entry */ return -1; } } return 0; } /* * Default hash function for string keys. * This is an implementation of the well-documented Jenkins one-at-a-time * hash function. */ size_t hashmap_hash_string(const void *key) { const char *key_str = (const char *)key; size_t hash = 0; for (; *key_str; ++key_str) { hash += *key_str; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash; } /* * Default key comparator function for string keys. */ int hashmap_compare_string(const void *a, const void *b) { return strcmp((const char *)a, (const char *)b); } /* * Default key allocation function for string keys. Use free() for the * key_free_func. */ void *hashmap_alloc_key_string(const void *key) { return (void *)strdup((const char *)key); } #ifdef HASHMAP_METRICS /* * Return the load factor. */ double hashmap_load_factor(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); if (!map->table_size) { return 0; } return (double)map->num_entries / map->table_size; } /* * Return the average number of collisions per entry. */ double hashmap_collisions_mean(const struct hashmap *map) { struct hashmap_entry *entry; size_t total_collisions = 0; HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return 0; } for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } total_collisions += entry->num_collisions; } return (double)total_collisions / map->num_entries; } /* * Return the variance between entry collisions. The higher the variance, * the more likely the hash function is poor and is resulting in clustering. */ double hashmap_collisions_variance(const struct hashmap *map) { struct hashmap_entry *entry; double mean_collisions; double variance; double total_variance = 0; HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return 0; } mean_collisions = hashmap_collisions_mean(map); for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } variance = (double)entry->num_collisions - mean_collisions; total_variance += variance * variance; } return total_variance / map->num_entries; } #endif prometheus-client-mmap-0.10.0/vendor/c/hashmap/src/hashmap.h0000644000004100000410000002020613606417577023760 0ustar www-datawww-data/* * Copyright (c) 2016-2017 David Leeds * * Hashmap is free software; you can redistribute it and/or modify * it under the terms of the MIT license. See LICENSE for details. */ #ifndef __HASHMAP_H__ #define __HASHMAP_H__ #include /* * Define HASHMAP_METRICS to compile in performance analysis * functions for use in assessing hash function performance. */ /* #define HASHMAP_METRICS */ /* * Define HASHMAP_NOASSERT to compile out all assertions used internally. */ /* #define HASHMAP_NOASSERT */ /* * Macros to declare type-specific versions of hashmap_*() functions to * allow compile-time type checking and avoid the need for type casting. */ #define HASHMAP_FUNCS_DECLARE(name, key_type, data_type) \ data_type *name##_hashmap_put(struct hashmap *map, key_type *key, \ data_type *data); \ data_type *name##_hashmap_get(const struct hashmap *map, \ key_type *key); \ data_type *name##_hashmap_remove(struct hashmap *map, \ key_type *key); \ key_type *name##_hashmap_iter_get_key( \ const struct hashmap_iter *iter); \ data_type *name##_hashmap_iter_get_data( \ const struct hashmap_iter *iter); \ void name##_hashmap_iter_set_data(const struct hashmap_iter *iter, \ data_type *data); \ int name##_hashmap_foreach(const struct hashmap *map, \ int (*func)(key_type *, data_type *, void *), void *arg); #define HASHMAP_FUNCS_CREATE(name, key_type, data_type) \ data_type *name##_hashmap_put(struct hashmap *map, key_type *key, \ data_type *data) \ { \ return (data_type *)hashmap_put(map, (const void *)key, \ (void *)data); \ } \ data_type *name##_hashmap_get(const struct hashmap *map, \ key_type *key) \ { \ return (data_type *)hashmap_get(map, (const void *)key); \ } \ data_type *name##_hashmap_remove(struct hashmap *map, \ key_type *key) \ { \ return (data_type *)hashmap_remove(map, (const void *)key); \ } \ key_type *name##_hashmap_iter_get_key( \ const struct hashmap_iter *iter) \ { \ return (key_type *)hashmap_iter_get_key(iter); \ } \ data_type *name##_hashmap_iter_get_data( \ const struct hashmap_iter *iter) \ { \ return (data_type *)hashmap_iter_get_data(iter); \ } \ void name##_hashmap_iter_set_data(const struct hashmap_iter *iter, \ data_type *data) \ { \ hashmap_iter_set_data(iter, (void *)data); \ } \ struct __##name##_hashmap_foreach_state { \ int (*func)(key_type *, data_type *, void *); \ void *arg; \ }; \ static inline int __##name##_hashmap_foreach_callback(const void *key, \ void *data, void *arg) \ { \ struct __##name##_hashmap_foreach_state *s = \ (struct __##name##_hashmap_foreach_state *)arg; \ return s->func((key_type *)key, (data_type *)data, s->arg); \ } \ int name##_hashmap_foreach(const struct hashmap *map, \ int (*func)(key_type *, data_type *, void *), void *arg) \ { \ struct __##name##_hashmap_foreach_state s = { func, arg }; \ return hashmap_foreach(map, \ __##name##_hashmap_foreach_callback, &s); \ } struct hashmap_iter; struct hashmap_entry; /* * The hashmap state structure. */ struct hashmap { size_t table_size_init; size_t table_size; size_t num_entries; struct hashmap_entry *table; size_t (*hash)(const void *); int (*key_compare)(const void *, const void *); void *(*key_alloc)(const void *); void (*key_free)(void *); }; /* * Initialize an empty hashmap. A hash function and a key comparator are * required. * * hash_func should return an even distribution of numbers between 0 * and SIZE_MAX varying on the key provided. * * key_compare_func should return 0 if the keys match, and non-zero otherwise. * * initial_size is optional, and may be set to the max number of entries * expected to be put in the hash table. This is used as a hint to * pre-allocate the hash table to the minimum size to avoid gratuitous rehashes. * If initial_size 0, a default size will be used. * * Returns 0 on success and -errno on failure. */ int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *), int (*key_compare_func)(const void *, const void *), size_t initial_size); /* * Free the hashmap and all associated memory. */ void hashmap_destroy(struct hashmap *map); /* * Enable internal memory allocation and management of hash keys. */ void hashmap_set_key_alloc_funcs(struct hashmap *map, void *(*key_alloc_func)(const void *), void (*key_free_func)(void *)); /* * Add an entry to the hashmap. If an entry with a matching key already * exists and has a data pointer associated with it, the existing data * pointer is returned, instead of assigning the new value. Compare * the return value with the data passed in to determine if a new entry was * created. Returns NULL if memory allocation failed. */ void *hashmap_put(struct hashmap *map, const void *key, void *data); /* * Return the data pointer, or NULL if no entry exists. */ void *hashmap_get(const struct hashmap *map, const void *key); /* * Remove an entry with the specified key from the map. * Returns the data pointer, or NULL, if no entry was found. */ void *hashmap_remove(struct hashmap *map, const void *key); /* * Remove all entries. */ void hashmap_clear(struct hashmap *map); /* * Remove all entries and reset the hash table to its initial size. */ void hashmap_reset(struct hashmap *map); /* * Return the number of entries in the hash map. */ size_t hashmap_size(const struct hashmap *map); /* * Get a new hashmap iterator. The iterator is an opaque * pointer that may be used with hashmap_iter_*() functions. * Hashmap iterators are INVALID after a put or remove operation is performed. * hashmap_iter_remove() allows safe removal during iteration. */ struct hashmap_iter *hashmap_iter(const struct hashmap *map); /* * Return an iterator to the next hashmap entry. Returns NULL if there are * no more entries. */ struct hashmap_iter *hashmap_iter_next(const struct hashmap *map, const struct hashmap_iter *iter); /* * Remove the hashmap entry pointed to by this iterator and returns an * iterator to the next entry. Returns NULL if there are no more entries. */ struct hashmap_iter *hashmap_iter_remove(struct hashmap *map, const struct hashmap_iter *iter); /* * Return the key of the entry pointed to by the iterator. */ const void *hashmap_iter_get_key(const struct hashmap_iter *iter); /* * Return the data of the entry pointed to by the iterator. */ void *hashmap_iter_get_data(const struct hashmap_iter *iter); /* * Set the data pointer of the entry pointed to by the iterator. */ void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data); /* * Invoke func for each entry in the hashmap. Unlike the hashmap_iter_*() * interface, this function supports calls to hashmap_remove() during iteration. * However, it is an error to put or remove an entry other than the current one, * and doing so will immediately halt iteration and return an error. * Iteration is stopped if func returns non-zero. Returns func's return * value if it is < 0, otherwise, 0. */ int hashmap_foreach(const struct hashmap *map, int (*func)(const void *, void *, void *), void *arg); /* * Default hash function for string keys. * This is an implementation of the well-documented Jenkins one-at-a-time * hash function. */ size_t hashmap_hash_string(const void *key); /* * Default key comparator function for string keys. */ int hashmap_compare_string(const void *a, const void *b); /* * Default key allocation function for string keys. Use free() for the * key_free_func. */ void *hashmap_alloc_key_string(const void *key); #ifdef HASHMAP_METRICS /* * Return the load factor. */ double hashmap_load_factor(const struct hashmap *map); /* * Return the average number of collisions per entry. */ double hashmap_collisions_mean(const struct hashmap *map); /* * Return the variance between entry collisions. The higher the variance, * the more likely the hash function is poor and is resulting in clustering. */ double hashmap_collisions_variance(const struct hashmap *map); #endif #endif /* __HASHMAP_H__ */ prometheus-client-mmap-0.10.0/ext/0000755000004100000410000000000013606417577017037 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/ext/fast_mmaped_file/0000755000004100000410000000000013606417577022316 5ustar www-datawww-dataprometheus-client-mmap-0.10.0/ext/fast_mmaped_file/value_access.c0000644000004100000410000001405513606417577025124 0ustar www-datawww-data#include #include #include #include #include #include #include "file_format.h" #include "mmap.h" #include "value_access.h" #include "utils.h" static void close_file(mm_ipc *i_mm) { close(i_mm->t->fd); i_mm->t->fd = -1; } static int open_and_extend_file(mm_ipc *i_mm, size_t len) { if (i_mm->t->fd < 0) { int fd; if ((fd = open(i_mm->t->path, i_mm->t->smode)) == -1) { return with_exception_errno(rb_eArgError, "%s: Can't open %s", __FILE__, i_mm->t->path); } i_mm->t->fd = fd; } if (lseek(i_mm->t->fd, len - 1, SEEK_SET) == -1) { close_file(i_mm); return with_exception_errno(rb_eIOError, "Can't lseek %zu", len - 1); } if (write(i_mm->t->fd, "\000", 1) != 1) { close_file(i_mm); return with_exception_errno(rb_eIOError, "Can't extend %s", i_mm->t->path); } i_mm->t->len = len; return SUCCESS; } static int perform_munmap(mm_ipc *i_mm) { if (i_mm->t->addr != NULL && munmap(i_mm->t->addr, i_mm->t->len)) { i_mm->t->addr = NULL; return with_exception_errno(rb_eArgError, "munmap failed"); } i_mm->t->addr = NULL; i_mm->t->len = 0; i_mm->t->real = 0; return SUCCESS; } static int perform_mmap(mm_ipc *i_mm, size_t len) { MMAP_RETTYPE addr = mmap(0, len, i_mm->t->pmode, i_mm->t->vscope, i_mm->t->fd, i_mm->t->offset); if (addr == MAP_FAILED) { return with_exception_errno(rb_eArgError, "mmap failed"); } i_mm->t->addr = addr; i_mm->t->len = len; i_mm->t->real = len; return SUCCESS; } static int expand(mm_ipc *i_mm, size_t len) { if (len < i_mm->t->len) { return with_exception(rb_eArgError, "Can't reduce the size of mmap"); } if (!perform_munmap(i_mm)) { return FAILURE; } if (!open_and_extend_file(i_mm, len)) { return FAILURE; } if (!perform_mmap(i_mm, len)) { close_file(i_mm); return FAILURE; } if ((i_mm->t->flag & MM_LOCK) && mlock(i_mm->t->addr, len) == -1) { return with_exception_errno(rb_eArgError, "mlock(%d)", errno); } return SUCCESS; } static void save_entry(mm_ipc *i_mm, size_t offset, VALUE key, VALUE value) { uint32_t key_length = (uint32_t)RSTRING_LEN(key); char *pos = (char *)i_mm->t->addr + offset; memcpy(pos, &key_length, sizeof(uint32_t)); pos += sizeof(uint32_t); memmove(pos, StringValuePtr(key), key_length); pos += key_length; memset(pos, ' ', padding_length(key_length)); // TODO: considder padding with /0 pos += padding_length(key_length); double val = NUM2DBL(value); memcpy(pos, &val, sizeof(double)); } static void save_value(mm_ipc *i_mm, VALUE _offset, VALUE value) { Check_Type(_offset, T_FIXNUM); size_t offset = NUM2UINT(_offset); if ((i_mm->t->real + sizeof(double)) <= offset) { rb_raise(rb_eIndexError, "offset %zu out of string", offset); } if (i_mm->t->flag & MM_FROZEN) { rb_error_frozen("mmap"); } char *pos = (char *)i_mm->t->addr + offset; double val = NUM2DBL(value); memcpy(pos, &val, sizeof(double)); } static VALUE load_value(mm_ipc *i_mm, VALUE _offset) { Check_Type(_offset, T_FIXNUM); size_t offset = NUM2UINT(_offset); if ((i_mm->t->real + sizeof(double)) <= offset) { rb_raise(rb_eIndexError, "offset %zu out of string", offset); } char *pos = (char *)i_mm->t->addr + offset; double value; memcpy(&value, pos, sizeof(double)); return DBL2NUM(value); } uint32_t load_used(mm_ipc *i_mm) { uint32_t used = *((uint32_t *)i_mm->t->addr); if (used == 0) { used = START_POSITION; } return used; } void save_used(mm_ipc *i_mm, uint32_t used) { *((uint32_t *)i_mm->t->addr) = used; } static VALUE initialize_entry(mm_ipc *i_mm, VALUE positions, VALUE key, VALUE value) { if (i_mm->t->flag & MM_FROZEN) { rb_error_frozen("mmap"); } if (RSTRING_LEN(key) > INT32_MAX) { rb_raise(rb_eArgError, "string length gt %d", INT32_MAX); } uint32_t key_length = (uint32_t)RSTRING_LEN(key); uint32_t value_offset = sizeof(uint32_t) + key_length + padding_length(key_length); uint32_t entry_length = value_offset + sizeof(double); uint32_t used = load_used(i_mm); while (i_mm->t->len < (used + entry_length)) { if (!expand(i_mm, i_mm->t->len * 2)) { raise_last_exception(); } } save_entry(i_mm, used, key, value); save_used(i_mm, used + entry_length); return rb_hash_aset(positions, key, INT2NUM(used + value_offset)); } VALUE method_fetch_entry(VALUE self, VALUE positions, VALUE key, VALUE default_value) { Check_Type(positions, T_HASH); Check_Type(key, T_STRING); mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); VALUE position = rb_hash_lookup(positions, key); if (position != Qnil) { return load_value(i_mm, position); } position = initialize_entry(i_mm, positions, key, default_value); return load_value(i_mm, position); } VALUE method_upsert_entry(VALUE self, VALUE positions, VALUE key, VALUE value) { Check_Type(positions, T_HASH); Check_Type(key, T_STRING); mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); VALUE position = rb_hash_lookup(positions, key); if (position != Qnil) { save_value(i_mm, position, value); return load_value(i_mm, position); } position = initialize_entry(i_mm, positions, key, value); return load_value(i_mm, position); } VALUE method_load_used(VALUE self) { mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); return UINT2NUM(load_used(i_mm)); } VALUE method_save_used(VALUE self, VALUE value) { Check_Type(value, T_FIXNUM); mm_ipc *i_mm; GET_MMAP(self, i_mm, MM_MODIFY); if (i_mm->t->flag & MM_FROZEN) { rb_error_frozen("mmap"); } if (i_mm->t->len < INITIAL_SIZE) { if (!expand(i_mm, INITIAL_SIZE)) { raise_last_exception(); } } save_used(i_mm, NUM2UINT(value)); return value; } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/fast_mmaped_file.c0000644000004100000410000000646413606417577025753 0ustar www-datawww-data#include #include #include #include #include #include #include "globals.h" #include "utils.h" #include "value_access.h" #include "file_parsing.h" #include "file_reading.h" #include "mmap.h" #include "rendering.h" VALUE MMAPED_FILE = Qnil; ID sym_min; ID sym_max; ID sym_livesum; ID sym_gauge; ID sym_pid; ID sym_samples; VALUE prom_eParsingError; int aggregate_files(struct hashmap *map, VALUE list_of_files) { buffer_t reading_buffer; memset(&reading_buffer, 0, sizeof(buffer_t)); for (int i = 0; i < RARRAY_LEN(list_of_files); i++) { VALUE params = RARRAY_PTR(list_of_files)[i]; file_t file; if (!file_open_from_params(&file, params)) { buffer_dispose(&reading_buffer); return 0; } if (!read_from_file(&file, &reading_buffer)) { buffer_dispose(&reading_buffer); file_close(&file); return 0; } if (!process_buffer(&file, &reading_buffer, map)) { buffer_dispose(&reading_buffer); file_close(&file); return 0; } if (!file_close(&file)) { buffer_dispose(&reading_buffer); return 0; } } buffer_dispose(&reading_buffer); return 1; } VALUE method_to_metrics(VALUE UNUSED(self), VALUE file_list) { struct hashmap map; hashmap_setup(&map); if (!aggregate_files(&map, file_list)) { // all entries in map are now copies that need to be disposed hashmap_destroy(&map); raise_last_exception(); return Qnil; } entry_t **sorted_entries; if (!sort_map_entries(&map, &sorted_entries)) { hashmap_destroy(&map); raise_last_exception(); return Qnil; } VALUE rv = rb_str_new("", 0); if (!entries_to_string(rv, sorted_entries, hashmap_size(&map))) { free(sorted_entries); hashmap_destroy(&map); raise_last_exception(); return Qnil; } RB_GC_GUARD(file_list); // ensure file list is not GCed before this point free(sorted_entries); hashmap_destroy(&map); return rv; } void Init_fast_mmaped_file() { sym_gauge = rb_intern("gauge"); sym_min = rb_intern("min"); sym_max = rb_intern("max"); sym_livesum = rb_intern("livesum"); sym_pid = rb_intern("pid"); sym_samples = rb_intern("samples"); prom_eParsingError = rb_define_class("PrometheusParsingError", rb_eRuntimeError); MMAPED_FILE = rb_define_class("FastMmapedFile", rb_cObject); rb_define_const(MMAPED_FILE, "MAP_SHARED", INT2FIX(MAP_SHARED)); rb_define_singleton_method(MMAPED_FILE, "to_metrics", method_to_metrics, 1); rb_define_alloc_func(MMAPED_FILE, mm_s_alloc); rb_define_singleton_method(MMAPED_FILE, "new", mm_s_new, -1); rb_define_method(MMAPED_FILE, "initialize", mm_init, 1); rb_define_method(MMAPED_FILE, "slice", mm_aref_m, -1); rb_define_method(MMAPED_FILE, "sync", mm_msync, -1); rb_define_method(MMAPED_FILE, "munmap", mm_unmap, 0); rb_define_method(MMAPED_FILE, "used", method_load_used, 0); rb_define_method(MMAPED_FILE, "used=", method_save_used, 1); rb_define_method(MMAPED_FILE, "fetch_entry", method_fetch_entry, 3); rb_define_method(MMAPED_FILE, "upsert_entry", method_upsert_entry, 3); } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/globals.h0000644000004100000410000000033413606417577024112 0ustar www-datawww-data#ifndef GLOBALS_H #define GLOBALS_H #include extern ID sym_min; extern ID sym_max; extern ID sym_livesum; extern ID sym_gauge; extern ID sym_pid; extern ID sym_samples; extern VALUE prom_eParsingError; #endifprometheus-client-mmap-0.10.0/ext/fast_mmaped_file/file_reading.c0000644000004100000410000000541713606417577025101 0ustar www-datawww-data#include #include #include #include "file_reading.h" #include "utils.h" static int file_open(file_t *source, const char *filepath) { source->file = fopen(filepath, "r"); size_t filepath_len = strlen(filepath) + sizeof(char); source->path = malloc(filepath_len); memcpy(source->path, filepath, filepath_len); if (source->file == NULL) { save_exception(rb_eArgError, "Can't open %s, errno: %d", filepath, errno); return 0; } struct stat sb; if (fstat(fileno(source->file), &sb) != 0) { fclose(source->file); save_exception(rb_eIOError, "Can't stat file, errno: %d", errno); return 0; } source->length = sb.st_size; // go to start if (fseek(source->file, 0L, SEEK_SET) != 0) { fclose(source->file); save_exception(rb_eIOError, "Can't fseek %zu, errno: %d", 0, errno); return 0; } return 1; } int file_close(file_t *source) { free(source->path); if (fclose(source->file) != 0) { save_exception(rb_eIOError, "Can't fclose file, errno: %d", 0, errno); return 0; } source->file = 0; return 1; } int file_open_from_params(file_t *source, VALUE params) { if (RARRAY_LEN(params) != 4) { save_exception(rb_eArgError, "wrong number of arguments %lu instead of 4", RARRAY_LEN(params)); return 0; } VALUE filepath = rb_ary_entry(params, 0); source->multiprocess_mode = rb_sym2id(rb_ary_entry(params, 1)); source->type = rb_sym2id(rb_ary_entry(params, 2)); source->pid = rb_ary_entry(params, 3); return file_open(source, StringValueCStr(filepath)); } int read_from_file(const file_t *source, buffer_t *data) { data->size = 0; if (data->buffer == NULL) { data->buffer = malloc(source->length); if (data->buffer == NULL) { save_exception(rb_eIOError, "Can't malloc %zu, errno: %d", source->length, errno); return 0; } data->capacity = source->length; } else if (data->capacity < source->length) { data->buffer = realloc(data->buffer, source->length); if (data->buffer == NULL) { save_exception(rb_eIOError, "Can't realloc %zu, errno: %d", source->length, errno); return 0; } data->capacity = source->length; } data->size = fread(data->buffer, sizeof(char), source->length, source->file); if (data->size != source->length) { save_exception(rb_eIOError, "Couldn't read whole file, read %zu, instead of %zu", data->size, source->length); return 0; } return 1; } void buffer_dispose(buffer_t *buffer) { if (buffer->buffer) { free(buffer->buffer); } buffer->buffer = NULL; buffer->size = 0; buffer->capacity = 0; } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/rendering.h0000644000004100000410000000026213606417577024444 0ustar www-datawww-data#ifndef RENDERING_H #define RENDERING_H #include #include int entries_to_string(VALUE string, entry_t **sorted_entries, size_t entries_count); #endif prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/mmap.c0000644000004100000410000002215313606417577023417 0ustar www-datawww-data#include #include #include #include #include #include "file_format.h" #include "mmap.h" #include "utils.h" #if 0 #include #define DEBUGF(format, ...) printf("%d: " format "\n", __LINE__, __VA_ARGS__) #else #define DEBUGF(format, ...) #endif typedef struct { VALUE obj, *argv; ID id; int flag, argc; } mm_bang; static VALUE mm_protect_bang(VALUE *t) { return rb_funcall2(t[0], (ID)t[1], (int)t[2], (VALUE *)t[3]); } static VALUE mm_recycle(VALUE str) { rb_gc_force_recycle(str); return str; } static VALUE mm_vunlock(VALUE obj) { mm_ipc *i_mm; GET_MMAP(obj, i_mm, 0); return Qnil; } static VALUE mm_str(VALUE obj, int modify) { mm_ipc *i_mm; VALUE ret = Qnil; GET_MMAP(obj, i_mm, modify & ~MM_ORIGIN); if (modify & MM_MODIFY) { if (i_mm->t->flag & MM_FROZEN) rb_error_frozen("mmap"); if (!OBJ_TAINTED(ret) && rb_safe_level() >= 4) rb_raise(rb_eSecurityError, "Insecure: can't modify mmap"); } ret = rb_obj_alloc(rb_cString); if (rb_obj_tainted(obj)) { OBJ_TAINT(ret); } RSTRING(ret)->as.heap.ptr = i_mm->t->addr; RSTRING(ret)->as.heap.aux.capa = i_mm->t->len; RSTRING(ret)->as.heap.len = i_mm->t->real; DEBUGF("RString capa: %d, len: %d", RSTRING(ret)->as.heap.aux.capa, RSTRING(ret)->as.heap.len); if (modify & MM_ORIGIN) { #if HAVE_RB_DEFINE_ALLOC_FUNC RSTRING(ret)->as.heap.aux.shared = obj; FL_SET(ret, RSTRING_NOEMBED); FL_SET(ret, FL_USER18); #else RSTRING(ret)->orig = ret; #endif } if (i_mm->t->flag & MM_FROZEN) { ret = rb_obj_freeze(ret); } return ret; } static VALUE mm_i_bang(bang_st) mm_bang *bang_st; { VALUE str, res; mm_ipc *i_mm; str = mm_str(bang_st->obj, bang_st->flag); if (bang_st->flag & MM_PROTECT) { VALUE tmp[4]; tmp[0] = str; tmp[1] = (VALUE)bang_st->id; tmp[2] = (VALUE)bang_st->argc; tmp[3] = (VALUE)bang_st->argv; res = rb_ensure(mm_protect_bang, (VALUE)tmp, mm_recycle, str); } else { res = rb_funcall2(str, bang_st->id, bang_st->argc, bang_st->argv); RB_GC_GUARD(res); } if (res != Qnil) { GET_MMAP(bang_st->obj, i_mm, 0); i_mm->t->real = RSTRING_LEN(str); } return res; } static VALUE mm_bang_i(VALUE obj, int flag, ID id, int argc, VALUE *argv) { VALUE res; mm_ipc *i_mm; mm_bang bang_st; GET_MMAP(obj, i_mm, 0); if ((flag & MM_CHANGE) && (i_mm->t->flag & MM_FIXED)) { rb_raise(rb_eTypeError, "try to change the size of a fixed map"); } bang_st.obj = obj; bang_st.flag = flag; bang_st.id = id; bang_st.argc = argc; bang_st.argv = argv; if (i_mm->t->flag & MM_IPC) { res = rb_ensure(mm_i_bang, (VALUE)&bang_st, mm_vunlock, obj); } else { res = mm_i_bang(&bang_st); } if (res == Qnil) return res; return (flag & MM_ORIGIN) ? res : obj; } static void mm_free(mm_ipc *i_mm) { if (i_mm->t->path) { if (munmap(i_mm->t->addr, i_mm->t->len) != 0) { if (i_mm->t->path != (char *)-1 && i_mm->t->path != NULL) { free(i_mm->t->path); } free(i_mm); rb_raise(rb_eRuntimeError, "munmap failed at %s:%d with errno: %d", __FILE__, __LINE__, errno); } if (i_mm->t->path != (char *)-1) { if (i_mm->t->real < i_mm->t->len && i_mm->t->vscope != MAP_PRIVATE && truncate(i_mm->t->path, i_mm->t->real) == -1) { free(i_mm->t->path); free(i_mm); rb_raise(rb_eTypeError, "truncate"); } free(i_mm->t->path); } } free(i_mm); } /* * call-seq: * new(file) * * create a new Mmap object * * * file * * * Creates a mapping that's shared with all other processes * mapping the same areas of the file. * */ VALUE mm_s_new(int argc, VALUE *argv, VALUE obj) { VALUE res = rb_funcall2(obj, rb_intern("allocate"), 0, 0); rb_obj_call_init(res, argc, argv); return res; } VALUE mm_s_alloc(VALUE obj) { VALUE res; mm_ipc *i_mm; res = Data_Make_Struct(obj, mm_ipc, 0, mm_free, i_mm); i_mm->t = ALLOC_N(mm_mmap, 1); MEMZERO(i_mm->t, mm_mmap, 1); i_mm->t->fd = -1; return res; } size_t next_page_boundary(size_t value) { size_t page_size = sysconf(_SC_PAGESIZE); while (page_size < value) { page_size *= 2; } return page_size; } /* Reference implementations: * mozilla: https://hg.mozilla.org/mozilla-central/file/3d846420a907/xpcom/glue/FileUtils.cpp#l71 * glibc: https://github.com/lattera/glibc/blob/master/sysdeps/posix/posix_fallocate.c */ int reserve_mmap_file_bytes(int fd, size_t size) { #if __linux__ /* From https://stackoverflow.com/a/22820221: The difference with * ftruncate(2) is that (on file systems supporting it, e.g. Ext4) * disk space is indeed reserved by posix_fallocate but ftruncate * extends the file by adding holes (and without reserving disk * space). */ return posix_fallocate(fd, 0, size); #else /* We simplify the reference implemnetations since we generally * don't need to reserve more than a page size. */ return ftruncate(fd, size); #endif } VALUE mm_init(VALUE obj, VALUE fname) { struct stat st; int fd, smode = 0, pmode = 0, vscope, perm, init; MMAP_RETTYPE addr; mm_ipc *i_mm; char *path; size_t size = 0; off_t offset; vscope = 0; path = 0; fd = -1; fname = rb_str_to_str(fname); SafeStringValue(fname); path = StringValuePtr(fname); { if (rb_safe_level() > 0 && OBJ_TAINTED(fname)) { rb_raise(rb_eSecurityError, "Insecure operation"); } rb_secure(1); } vscope = MAP_SHARED; size = 0; perm = 0666; smode = O_RDWR; pmode = PROT_READ | PROT_WRITE; if ((fd = open(path, smode, perm)) == -1) { rb_raise(rb_eArgError, "Can't open %s", path); } if (fstat(fd, &st) == -1) { close(fd); rb_raise(rb_eArgError, "Can't stat %s", path); } size = st.st_size; Data_Get_Struct(obj, mm_ipc, i_mm); offset = 0; init = 0; if (size == 0) { init = 1; size = INITIAL_SIZE; } /* We need to ensure the underlying file descriptor is at least a page size. * Otherwise, we could get a SIGBUS error if mmap() attempts to read or write * past the file. */ size_t reserve_size = next_page_boundary(size); if (reserve_mmap_file_bytes(fd, reserve_size) != 0) { close(fd); rb_raise(rb_eIOError, "Can't reserve %zu bytes for memory-mapped file", reserve_size); } addr = mmap(0, size, pmode, vscope, fd, offset); if (addr == MAP_FAILED || !addr) { close(fd); rb_raise(rb_eArgError, "mmap failed (%d)", errno); } i_mm->t->fd = fd; i_mm->t->addr = addr; i_mm->t->len = size; if (!init) { i_mm->t->real = size; } i_mm->t->pmode = pmode; i_mm->t->vscope = vscope; i_mm->t->smode = smode & ~O_TRUNC; i_mm->t->path = (path) ? ruby_strdup(path) : (char *)-1; if (smode == O_WRONLY) { i_mm->t->flag |= MM_FIXED; } OBJ_TAINT(obj); return obj; } /* * Document-method: [] * Document-method: slice * * call-seq: [](args) * * Element reference - with the following syntax: * * self[nth] * * retrieve the nth character * * self[start..last] * * return a substring from start to last * * self[start, length] * * return a substring of lenght characters from start */ VALUE mm_aref_m(int argc, VALUE *argv, VALUE obj) { return mm_bang_i(obj, MM_ORIGIN, rb_intern("[]"), argc, argv); } /* * Document-method: msync * Document-method: sync * Document-method: flush * * call-seq: msync * * flush the file */ VALUE mm_msync(int argc, VALUE *argv, VALUE obj) { mm_ipc *i_mm; GET_MMAP(obj, i_mm, MM_MODIFY); VALUE oflag; int ret; int flag = MS_SYNC; if (argc) { rb_scan_args(argc, argv, "01", &oflag); flag = NUM2INT(oflag); } if ((ret = msync(i_mm->t->addr, i_mm->t->len, flag)) != 0) { rb_raise(rb_eArgError, "msync(%d)", ret); } return obj; } /* * Document-method: munmap * Document-method: unmap * * call-seq: munmap * * terminate the association */ VALUE mm_unmap(VALUE obj) { mm_ipc *i_mm; GET_MMAP(obj, i_mm, 0); if (i_mm->t->path) { if (munmap(i_mm->t->addr, i_mm->t->len) != 0) { if (i_mm->t->path != (char *)-1 && i_mm->t->path != NULL) { free(i_mm->t->path); i_mm->t->path = NULL; } rb_raise(rb_eRuntimeError, "munmap failed at %s:%d with errno: %d", __FILE__, __LINE__, errno); } if (i_mm->t->path != (char *)-1) { if (i_mm->t->real < i_mm->t->len && i_mm->t->vscope != MAP_PRIVATE && truncate(i_mm->t->path, i_mm->t->real) == -1) { rb_raise(rb_eTypeError, "truncate"); } free(i_mm->t->path); } i_mm->t->path = NULL; } return Qnil; } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/utils.c0000644000004100000410000000315513606417577023626 0ustar www-datawww-data#include #include #include "utils.h" static void rb_save_exception(VALUE exception, VALUE message) { VALUE current_thread = rb_thread_current(); rb_thread_local_aset(current_thread, rb_intern("prometheus_last_exception"), exception); rb_thread_local_aset(current_thread, rb_intern("prometheus_last_exception_message"), message); } /* @deprecated - use with_exception ignoring return value */ void save_exception(VALUE exception, const char *fmt, ...) { va_list args; va_start(args, fmt); VALUE message = rb_vsprintf(fmt, args); rb_save_exception(exception, message); va_end(args); } int with_exception(VALUE exception, const char *fmt, ...) { va_list args; va_start(args, fmt); VALUE message = rb_vsprintf(fmt, args); rb_save_exception(exception, message); va_end(args); return FAILURE; } int with_exception_errno(VALUE exception, const char *fmt, ...) { va_list args; va_start(args, fmt); VALUE message = rb_vsprintf(fmt, args); rb_str_catf(message, " (%s)", strerror(errno)); rb_save_exception(exception, message); va_end(args); return FAILURE; } NORETURN(void raise_last_exception()) { VALUE current_thread = rb_thread_current(); VALUE exception = rb_thread_local_aref(current_thread, rb_intern("prometheus_last_exception")); VALUE message = rb_thread_local_aref(current_thread, rb_intern("prometheus_last_exception_message")); if (exception != Qnil) { rb_raise(exception, "%s", StringValueCStr(message)); } else { rb_raise(rb_eRuntimeError, "no exception found in thread local"); } } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/value_access.h0000644000004100000410000000055013606417577025124 0ustar www-datawww-data#ifndef VALUE_ACCESS_H #define VALUE_ACCESS_H VALUE method_load_used(VALUE self); VALUE method_save_used(VALUE self, VALUE value); VALUE method_get_double(VALUE self, VALUE index); VALUE method_fetch_entry(VALUE self, VALUE positions, VALUE key, VALUE default_value); VALUE method_upsert_entry(VALUE self, VALUE positions, VALUE key, VALUE value); #endifprometheus-client-mmap-0.10.0/ext/fast_mmaped_file/utils.h0000644000004100000410000000074713606417577023637 0ustar www-datawww-data#ifndef UNUSED_H #define UNUSED_H #ifdef UNUSED #elif defined(__GNUC__) #define UNUSED(x) UNUSED_##x __attribute__((unused)) #elif defined(__LCLINT__) #define UNUSED(x) /*@unused@*/ x #else #define UNUSED(x) x #endif #define SUCCESS 1 #define FAILURE 0 NORETURN(void raise_last_exception()); void save_exception(VALUE exception, const char *fmt, ...); int with_exception(VALUE exception, const char *fmt, ...); int with_exception_errno(VALUE exception, const char *fmt, ...); #endif prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/hashmap.c0000644000004100000410000004053613606417577024113 0ustar www-datawww-data/* * Copyright (c) 2016-2017 David Leeds * * Hashmap is free software; you can redistribute it and/or modify * it under the terms of the MIT license. See LICENSE for details. */ #include #include #include #include #include #include "hashmap.h" #ifndef HASHMAP_NOASSERT #include #define HASHMAP_ASSERT(expr) assert(expr) #else #define HASHMAP_ASSERT(expr) #endif /* Table sizes must be powers of 2 */ #define HASHMAP_SIZE_MIN (1 << 5) /* 32 */ #define HASHMAP_SIZE_DEFAULT (1 << 8) /* 256 */ #define HASHMAP_SIZE_MOD(map, val) ((val) & ((map)->table_size - 1)) /* Limit for probing is 1/2 of table_size */ #define HASHMAP_PROBE_LEN(map) ((map)->table_size >> 1) /* Return the next linear probe index */ #define HASHMAP_PROBE_NEXT(map, index) HASHMAP_SIZE_MOD(map, (index) + 1) /* Check if index b is less than or equal to index a */ #define HASHMAP_INDEX_LE(map, a, b) \ ((a) == (b) || (((b) - (a)) & ((map)->table_size >> 1)) != 0) struct hashmap_entry { void *key; void *data; #ifdef HASHMAP_METRICS size_t num_collisions; #endif }; /* * Enforce a maximum 0.75 load factor. */ static inline size_t hashmap_table_min_size_calc(size_t num_entries) { return num_entries + (num_entries / 3); } /* * Calculate the optimal table size, given the specified max number * of elements. */ static size_t hashmap_table_size_calc(size_t num_entries) { size_t table_size; size_t min_size; table_size = hashmap_table_min_size_calc(num_entries); /* Table size is always a power of 2 */ min_size = HASHMAP_SIZE_MIN; while (min_size < table_size) { min_size <<= 1; } return min_size; } /* * Get a valid hash table index from a key. */ static inline size_t hashmap_calc_index(const struct hashmap *map, const void *key) { return HASHMAP_SIZE_MOD(map, map->hash(key)); } /* * Return the next populated entry, starting with the specified one. * Returns NULL if there are no more valid entries. */ static struct hashmap_entry *hashmap_entry_get_populated( const struct hashmap *map, struct hashmap_entry *entry) { for (; entry < &map->table[map->table_size]; ++entry) { if (entry->key) { return entry; } } return NULL; } /* * Find the hashmap entry with the specified key, or an empty slot. * Returns NULL if the entire table has been searched without finding a match. */ static struct hashmap_entry *hashmap_entry_find(const struct hashmap *map, const void *key, bool find_empty) { size_t i; size_t index; size_t probe_len = HASHMAP_PROBE_LEN(map); struct hashmap_entry *entry; index = hashmap_calc_index(map, key); /* Linear probing */ for (i = 0; i < probe_len; ++i) { entry = &map->table[index]; if (!entry->key) { if (find_empty) { #ifdef HASHMAP_METRICS entry->num_collisions = i; #endif return entry; } return NULL; } if (map->key_compare(key, entry->key) == 0) { return entry; } index = HASHMAP_PROBE_NEXT(map, index); } return NULL; } /* * Removes the specified entry and processes the proceeding entries to reduce * the load factor and keep the chain continuous. This is a required * step for hash maps using linear probing. */ static void hashmap_entry_remove(struct hashmap *map, struct hashmap_entry *removed_entry) { size_t i; #ifdef HASHMAP_METRICS size_t removed_i = 0; #endif size_t index; size_t entry_index; size_t removed_index = (removed_entry - map->table); struct hashmap_entry *entry; /* Free the key */ if (map->key_free) { map->key_free(removed_entry->key); } --map->num_entries; /* Fill the free slot in the chain */ index = HASHMAP_PROBE_NEXT(map, removed_index); for (i = 1; i < map->table_size; ++i) { entry = &map->table[index]; if (!entry->key) { /* Reached end of chain */ break; } entry_index = hashmap_calc_index(map, entry->key); /* Shift in entries with an index <= to the removed slot */ if (HASHMAP_INDEX_LE(map, removed_index, entry_index)) { #ifdef HASHMAP_METRICS entry->num_collisions -= (i - removed_i); removed_i = i; #endif memcpy(removed_entry, entry, sizeof(*removed_entry)); removed_index = index; removed_entry = entry; } index = HASHMAP_PROBE_NEXT(map, index); } /* Clear the last removed entry */ memset(removed_entry, 0, sizeof(*removed_entry)); } /* * Reallocates the hash table to the new size and rehashes all entries. * new_size MUST be a power of 2. * Returns 0 on success and -errno on allocation or hash function failure. */ static int hashmap_rehash(struct hashmap *map, size_t new_size) { size_t old_size; struct hashmap_entry *old_table; struct hashmap_entry *new_table; struct hashmap_entry *entry; struct hashmap_entry *new_entry; HASHMAP_ASSERT(new_size >= HASHMAP_SIZE_MIN); HASHMAP_ASSERT((new_size & (new_size - 1)) == 0); new_table = (struct hashmap_entry *)calloc(new_size, sizeof(struct hashmap_entry)); if (!new_table) { return -ENOMEM; } /* Backup old elements in case of rehash failure */ old_size = map->table_size; old_table = map->table; map->table_size = new_size; map->table = new_table; /* Rehash */ for (entry = old_table; entry < &old_table[old_size]; ++entry) { if (!entry->data) { /* Only copy entries with data */ continue; } new_entry = hashmap_entry_find(map, entry->key, true); if (!new_entry) { /* * The load factor is too high with the new table * size, or a poor hash function was used. */ goto revert; } /* Shallow copy (intentionally omits num_collisions) */ new_entry->key = entry->key; new_entry->data = entry->data; } free(old_table); return 0; revert: map->table_size = old_size; map->table = old_table; free(new_table); return -EINVAL; } /* * Iterate through all entries and free all keys. */ static void hashmap_free_keys(struct hashmap *map) { struct hashmap_iter *iter; if (!map->key_free) { return; } for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) { map->key_free((void *)hashmap_iter_get_key(iter)); } } /* * Initialize an empty hashmap. A hash function and a key comparator are * required. * * hash_func should return an even distribution of numbers between 0 * and SIZE_MAX varying on the key provided. * * key_compare_func should return 0 if the keys match, and non-zero otherwise. * * initial_size is optional, and may be set to the max number of entries * expected to be put in the hash table. This is used as a hint to * pre-allocate the hash table to the minimum size needed to avoid * gratuitous rehashes. If initial_size 0, a default size will be used. * * Returns 0 on success and -errno on failure. */ int hashmap_init(struct hashmap *map, size_t (*hash_func)(const void *), int (*key_compare_func)(const void *, const void *), size_t initial_size) { HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(hash_func != NULL); HASHMAP_ASSERT(key_compare_func != NULL); if (!initial_size) { initial_size = HASHMAP_SIZE_DEFAULT; } else { /* Convert init size to valid table size */ initial_size = hashmap_table_size_calc(initial_size); } map->table_size_init = initial_size; map->table_size = initial_size; map->num_entries = 0; map->table = (struct hashmap_entry *)calloc(initial_size, sizeof(struct hashmap_entry)); if (!map->table) { return -ENOMEM; } map->hash = hash_func; map->key_compare = key_compare_func; map->key_alloc = NULL; map->key_free = NULL; return 0; } /* * Free the hashmap and all associated memory. */ void hashmap_destroy(struct hashmap *map) { if (!map) { return; } hashmap_free_keys(map); free(map->table); memset(map, 0, sizeof(*map)); } /* * Enable internal memory management of hash keys. */ void hashmap_set_key_alloc_funcs(struct hashmap *map, void *(*key_alloc_func)(const void *), void (*key_free_func)(void *)) { HASHMAP_ASSERT(map != NULL); map->key_alloc = key_alloc_func; map->key_free = key_free_func; } /* * Add an entry to the hashmap. If an entry with a matching key already * exists and has a data pointer associated with it, the existing data * pointer is returned, instead of assigning the new value. Compare * the return value with the data passed in to determine if a new entry was * created. Returns NULL if memory allocation failed. */ void *hashmap_put(struct hashmap *map, const void *key, void *data) { struct hashmap_entry *entry; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); /* Rehash with 2x capacity if load factor is approaching 0.75 */ if (map->table_size <= hashmap_table_min_size_calc(map->num_entries)) { hashmap_rehash(map, map->table_size << 1); } entry = hashmap_entry_find(map, key, true); if (!entry) { /* * Cannot find an empty slot. Either out of memory, or using * a poor hash function. Attempt to rehash once to reduce * chain length. */ if (hashmap_rehash(map, map->table_size << 1) < 0) { return NULL; } entry = hashmap_entry_find(map, key, true); if (!entry) { return NULL; } } if (!entry->key) { /* Allocate copy of key to simplify memory management */ if (map->key_alloc) { entry->key = map->key_alloc(key); if (!entry->key) { return NULL; } } else { entry->key = (void *)key; } ++map->num_entries; } else if (entry->data) { /* Do not overwrite existing data */ return entry->data; } entry->data = data; return data; } /* * Return the data pointer, or NULL if no entry exists. */ void *hashmap_get(const struct hashmap *map, const void *key) { struct hashmap_entry *entry; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); entry = hashmap_entry_find(map, key, false); if (!entry) { return NULL; } return entry->data; } /* * Remove an entry with the specified key from the map. * Returns the data pointer, or NULL, if no entry was found. */ void *hashmap_remove(struct hashmap *map, const void *key) { struct hashmap_entry *entry; void *data; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(key != NULL); entry = hashmap_entry_find(map, key, false); if (!entry) { return NULL; } data = entry->data; /* Clear the entry and make the chain contiguous */ hashmap_entry_remove(map, entry); return data; } /* * Remove all entries. */ void hashmap_clear(struct hashmap *map) { HASHMAP_ASSERT(map != NULL); hashmap_free_keys(map); map->num_entries = 0; memset(map->table, 0, sizeof(struct hashmap_entry) * map->table_size); } /* * Remove all entries and reset the hash table to its initial size. */ void hashmap_reset(struct hashmap *map) { struct hashmap_entry *new_table; HASHMAP_ASSERT(map != NULL); hashmap_clear(map); if (map->table_size == map->table_size_init) { return; } new_table = (struct hashmap_entry *)realloc(map->table, sizeof(struct hashmap_entry) * map->table_size_init); if (!new_table) { return; } map->table = new_table; map->table_size = map->table_size_init; } /* * Return the number of entries in the hash map. */ size_t hashmap_size(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); return map->num_entries; } /* * Get a new hashmap iterator. The iterator is an opaque * pointer that may be used with hashmap_iter_*() functions. * Hashmap iterators are INVALID after a put or remove operation is performed. * hashmap_iter_remove() allows safe removal during iteration. */ struct hashmap_iter *hashmap_iter(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return NULL; } return (struct hashmap_iter *)hashmap_entry_get_populated(map, map->table); } /* * Return an iterator to the next hashmap entry. Returns NULL if there are * no more entries. */ struct hashmap_iter *hashmap_iter_next(const struct hashmap *map, const struct hashmap_iter *iter) { struct hashmap_entry *entry = (struct hashmap_entry *)iter; HASHMAP_ASSERT(map != NULL); if (!iter) { return NULL; } return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry + 1); } /* * Remove the hashmap entry pointed to by this iterator and return an * iterator to the next entry. Returns NULL if there are no more entries. */ struct hashmap_iter *hashmap_iter_remove(struct hashmap *map, const struct hashmap_iter *iter) { struct hashmap_entry *entry = (struct hashmap_entry *)iter; HASHMAP_ASSERT(map != NULL); if (!iter) { return NULL; } if (!entry->key) { /* Iterator is invalid, so just return the next valid entry */ return hashmap_iter_next(map, iter); } hashmap_entry_remove(map, entry); return (struct hashmap_iter *)hashmap_entry_get_populated(map, entry); } /* * Return the key of the entry pointed to by the iterator. */ const void *hashmap_iter_get_key(const struct hashmap_iter *iter) { if (!iter) { return NULL; } return (const void *)((struct hashmap_entry *)iter)->key; } /* * Return the data of the entry pointed to by the iterator. */ void *hashmap_iter_get_data(const struct hashmap_iter *iter) { if (!iter) { return NULL; } return ((struct hashmap_entry *)iter)->data; } /* * Set the data pointer of the entry pointed to by the iterator. */ void hashmap_iter_set_data(const struct hashmap_iter *iter, void *data) { if (!iter) { return; } ((struct hashmap_entry *)iter)->data = data; } /* * Invoke func for each entry in the hashmap. Unlike the hashmap_iter_*() * interface, this function supports calls to hashmap_remove() during iteration. * However, it is an error to put or remove an entry other than the current one, * and doing so will immediately halt iteration and return an error. * Iteration is stopped if func returns non-zero. Returns func's return * value if it is < 0, otherwise, 0. */ int hashmap_foreach(const struct hashmap *map, int (*func)(const void *, void *, void *), void *arg) { struct hashmap_entry *entry; size_t num_entries; const void *key; int rc; HASHMAP_ASSERT(map != NULL); HASHMAP_ASSERT(func != NULL); entry = map->table; for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } num_entries = map->num_entries; key = entry->key; rc = func(entry->key, entry->data, arg); if (rc < 0) { return rc; } if (rc > 0) { return 0; } /* Run this entry again if func() deleted it */ if (entry->key != key) { --entry; } else if (num_entries != map->num_entries) { /* Stop immediately if func put/removed another entry */ return -1; } } return 0; } /* * Default hash function for string keys. * This is an implementation of the well-documented Jenkins one-at-a-time * hash function. */ size_t hashmap_hash_string(const void *key) { const char *key_str = (const char *)key; size_t hash = 0; for (; *key_str; ++key_str) { hash += *key_str; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash; } /* * Default key comparator function for string keys. */ int hashmap_compare_string(const void *a, const void *b) { return strcmp((const char *)a, (const char *)b); } /* * Default key allocation function for string keys. Use free() for the * key_free_func. */ void *hashmap_alloc_key_string(const void *key) { return (void *)strdup((const char *)key); } #ifdef HASHMAP_METRICS /* * Return the load factor. */ double hashmap_load_factor(const struct hashmap *map) { HASHMAP_ASSERT(map != NULL); if (!map->table_size) { return 0; } return (double)map->num_entries / map->table_size; } /* * Return the average number of collisions per entry. */ double hashmap_collisions_mean(const struct hashmap *map) { struct hashmap_entry *entry; size_t total_collisions = 0; HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return 0; } for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } total_collisions += entry->num_collisions; } return (double)total_collisions / map->num_entries; } /* * Return the variance between entry collisions. The higher the variance, * the more likely the hash function is poor and is resulting in clustering. */ double hashmap_collisions_variance(const struct hashmap *map) { struct hashmap_entry *entry; double mean_collisions; double variance; double total_variance = 0; HASHMAP_ASSERT(map != NULL); if (!map->num_entries) { return 0; } mean_collisions = hashmap_collisions_mean(map); for (entry = map->table; entry < &map->table[map->table_size]; ++entry) { if (!entry->key) { continue; } variance = (double)entry->num_collisions - mean_collisions; total_variance += variance * variance; } return total_variance / map->num_entries; } #endif prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/file_reading.h0000644000004100000410000000101213606417577025071 0ustar www-datawww-data#ifndef FILE_READING_H #define FILE_READING_H #include typedef struct { FILE *file; size_t length; char *path; // Information processed from file path ID multiprocess_mode; ID type; VALUE pid; } file_t; typedef struct { char *buffer; size_t size; size_t capacity; } buffer_t; int file_close(file_t *file); int file_open_from_params(file_t *file, VALUE params); int read_from_file(const file_t *source, buffer_t *data); void buffer_dispose(buffer_t *buffer); #endif prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/file_format.h0000644000004100000410000000027413606417577024761 0ustar www-datawww-data#ifndef FILE_FORMAT_H #define FILE_FORMAT_H #include #define START_POSITION 8 #define INITIAL_SIZE (2 * sizeof(int32_t)) uint32_t padding_length(uint32_t key_length); #endifprometheus-client-mmap-0.10.0/ext/fast_mmaped_file/mmap.h0000644000004100000410000000310313606417577023416 0ustar www-datawww-data#ifndef MMAP_H #define MMAP_H #include #include #define MM_MODIFY 1 #define MM_ORIGIN 2 #define MM_CHANGE (MM_MODIFY | 4) #define MM_PROTECT 8 #define MM_FROZEN (1 << 0) #define MM_FIXED (1 << 1) #define MM_ANON (1 << 2) #define MM_LOCK (1 << 3) #define MM_IPC (1 << 4) #define MM_TMP (1 << 5) #ifndef MMAP_RETTYPE #define MMAP_RETTYPE void * #endif typedef struct { MMAP_RETTYPE addr; int smode, pmode, vscope; int advice, flag; VALUE key; size_t len, real; off_t offset; int fd; char *path; } mm_mmap; typedef struct { int count; mm_mmap *t; } mm_ipc; #define GET_MMAP(obj, i_mm, t_modify) \ Data_Get_Struct(obj, mm_ipc, i_mm); \ if (!i_mm->t->path || i_mm->t->fd < 0 || i_mm->t->addr == NULL || i_mm->t->addr == MAP_FAILED) { \ rb_raise(rb_eIOError, "unmapped file"); \ } \ if ((t_modify & MM_MODIFY) && (i_mm->t->flag & MM_FROZEN)) { \ rb_error_frozen("mmap"); \ } VALUE mm_s_alloc(VALUE obj); VALUE mm_s_new(int argc, VALUE *argv, VALUE obj); VALUE mm_init(VALUE obj, VALUE fname); VALUE mm_aref_m(int argc, VALUE *argv, VALUE obj); VALUE mm_msync(int argc, VALUE *argv, VALUE obj); VALUE mm_unmap(VALUE obj); #endif prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/extconf.rb0000644000004100000410000000141713606417577024314 0ustar www-datawww-datarequire 'mkmf' require 'fileutils' $CFLAGS << ' -std=c99 -D_POSIX_C_SOURCE=200809L -Wall -Wextra' if enable_config('fail-on-warning') $CFLAGS << ' -Werrno' end if enable_config('debug') $CFLAGS << ' -O0 -g' end if enable_config('address-sanitizer') $CFLAGS << ' -O -fsanitize=address -fno-omit-frame-pointer -g' end CONFIG['warnflags'].slice!(/ -Wdeclaration-after-statement/) cwd = File.expand_path(File.dirname(__FILE__)) vendor_dir = File.join(cwd, '../../vendor/c') src_dir = File.join(cwd, '../../ext/fast_mmaped_file') src_files = %W[#{vendor_dir}/jsmn/jsmn.c #{vendor_dir}/hashmap/src/hashmap.c] FileUtils.cp(src_files, src_dir) $INCFLAGS << " -I#{vendor_dir}/jsmn -I#{vendor_dir}/hashmap/src" dir_config('fast_mmaped_file') create_makefile('fast_mmaped_file') prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/file_format.c0000644000004100000410000000024313606417577024750 0ustar www-datawww-data#include "file_format.h" inline uint32_t padding_length(uint32_t key_length) { return 8 - (sizeof(uint32_t) + key_length) % 8; // padding | 8 byte aligned } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/rendering.c0000644000004100000410000001276713606417577024454 0ustar www-datawww-data#include #include #include "file_parsing.h" #include "globals.h" #include "rendering.h" #include "utils.h" #ifndef DBL_DECIMAL_DIG #define DBL_DECIMAL_DIG 17 #endif #define LABELS_START_OFFSET 4 static inline int is_valid(const jsmntok_t *token) { return token->start < token->end && token->start >= 0; } static inline int valid_not_null(const entry_t *entry, const jsmntok_t *token) { static const char null_s[] = "null"; if (!is_valid(token)) { return 0; } if (token->type != JSMN_PRIMITIVE) { return 1; } size_t token_len = token->end - token->start; if (token_len < sizeof(null_s) - 1) { return 1; } return strncmp(null_s, entry->json + token->start, sizeof(null_s) - 1) != 0; } static inline int append_token(VALUE string, const entry_t *entry, const jsmntok_t *token) { if (!is_valid(token)) { save_exception(prom_eParsingError, "parsing failed: %s", entry->json); return 0; } rb_str_cat(string, entry->json + token->start, token->end - token->start); return 1; } static int append_labels(VALUE string, const entry_t *entry, const int label_count, const jsmntok_t *tokens) { if (label_count <= 0) { if (is_pid_significant(entry)) { rb_str_cat(string, "{pid=\"", 6); rb_str_append(string, entry->pid); rb_str_cat(string, "\"}", 2); } return 1; } rb_str_cat(string, "{", 1); for (int i = 0; i < label_count; i++) { int key = LABELS_START_OFFSET + i; int val = LABELS_START_OFFSET + label_count + 1 + i; if (!append_token(string, entry, &tokens[key])) { return 0; } rb_str_cat(string, "=", 1); rb_str_cat(string, "\"", 1); if (valid_not_null(entry, &tokens[val])) { append_token(string, entry, &tokens[val]); } rb_str_cat(string, "\"", 1); if (i < label_count - 1) { rb_str_cat(string, ",", 1); } } if (is_pid_significant(entry)) { rb_str_cat(string, ",pid=\"", 6); rb_str_append(string, entry->pid); rb_str_cat(string, "\"", 1); } rb_str_cat(string, "}", 1); return 1; } static int validate_token_count(const int token_count, const entry_t *entry) { if (token_count < 0) { save_exception(prom_eParsingError, "too many labels or malformed json: %s", entry->json); return 0; } if (token_count < LABELS_START_OFFSET) { save_exception(prom_eParsingError, "malformed json: %s", entry->json); return 0; } if ((token_count - (LABELS_START_OFFSET + 1)) % 2 != 0) { save_exception(prom_eParsingError, "mismatched number of labels: %s", entry->json); return 0; } return 1; } static int append_entry(VALUE string, const entry_t *entry) { jsmn_parser parser; jsmn_init(&parser); jsmntok_t tokens[200]; jsmntok_t *name_token = &tokens[2]; int token_count = jsmn_parse(&parser, entry->json, entry->json_size, tokens, sizeof(tokens) / sizeof(tokens[0])); int label_count = (token_count - (LABELS_START_OFFSET + 1)) / 2; // // Example JSON "['metric', 'name',['label_a','label_b'],['value_a', 'value_b']]" // will be parsed into following token list: // // [ "'metric', 'name',['label_a','label_b'],['value_a', 'value_b']", // "metric", "name", // "['label_a','label_b']", "label_a", "label_b", // "['value_a', 'value_b']", "value_a", "value_b" ] // // where 'metric' is the name of the metric, while 'name' // is in summaries and histograms to store names of "submetrics" like: // histogram_name_bucket or histogram_name_sum if (!validate_token_count(token_count, entry)) { return 0; } if (!append_token(string, entry, name_token)) { return 0; } if (!append_labels(string, entry, label_count, tokens)) { return 0; } char value[255]; // print value with highest possible precision so that we do not lose any data int written = snprintf(value, sizeof(value), " %.*g\n", DBL_DECIMAL_DIG, entry->value); rb_str_cat(string, value, written); return 1; } static void append_entry_head(VALUE string, const entry_t *entry) { static const char help_beg[] = "# HELP "; static const char help_fin[] = " Multiprocess metric\n"; rb_str_cat(string, help_beg, sizeof(help_beg) - 1); rb_str_cat(string, entry->name, entry->name_len); rb_str_cat(string, help_fin, sizeof(help_fin) - 1); static const char type_beg[] = "# TYPE "; rb_str_cat(string, type_beg, sizeof(type_beg) - 1); rb_str_cat(string, entry->name, entry->name_len); rb_str_cat(string, " ", 1); rb_str_cat2(string, rb_id2name(entry->type)); rb_str_cat(string, "\n", 1); } static inline int entry_name_equal(const entry_t *a, const entry_t *b) { if (a == NULL || b == NULL) { return a == b; } if (a->name_len != b->name_len) { return 0; } return strncmp(a->name, b->name, a->name_len) == 0; } int entries_to_string(VALUE string, entry_t **sorted_entries, size_t entries_count) { entry_t *previous = NULL; for (size_t i = 0; i < entries_count; i++) { entry_t *entry = sorted_entries[i]; // when entry->name changes write metric header if (!entry_name_equal(previous, entry)) { previous = entry; append_entry_head(string, entry); } if (!append_entry(string, entry)) { return 0; } } return 1; } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/jsmn.c0000644000004100000410000001725313606417577023441 0ustar www-datawww-data#include "jsmn.h" /** * Allocates a fresh unused token from the token pull. */ static jsmntok_t *jsmn_alloc_token(jsmn_parser *parser, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *tok; if (parser->toknext >= num_tokens) { return NULL; } tok = &tokens[parser->toknext++]; tok->start = tok->end = -1; tok->size = 0; #ifdef JSMN_PARENT_LINKS tok->parent = -1; #endif return tok; } /** * Fills token type and boundaries. */ static void jsmn_fill_token(jsmntok_t *token, jsmntype_t type, int start, int end) { token->type = type; token->start = start; token->end = end; token->size = 0; } /** * Fills next available token with JSON primitive. */ static int jsmn_parse_primitive(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *token; int start; start = parser->pos; for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { switch (js[parser->pos]) { #ifndef JSMN_STRICT /* In strict mode primitive must be followed by "," or "}" or "]" */ case ':': #endif case '\t' : case '\r' : case '\n' : case ' ' : case ',' : case ']' : case '}' : goto found; } if (js[parser->pos] < 32 || js[parser->pos] >= 127) { parser->pos = start; return JSMN_ERROR_INVAL; } } #ifdef JSMN_STRICT /* In strict mode primitive must be followed by a comma/object/array */ parser->pos = start; return JSMN_ERROR_PART; #endif found: if (tokens == NULL) { parser->pos--; return 0; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) { parser->pos = start; return JSMN_ERROR_NOMEM; } jsmn_fill_token(token, JSMN_PRIMITIVE, start, parser->pos); #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif parser->pos--; return 0; } /** * Fills next token with JSON string. */ static int jsmn_parse_string(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, size_t num_tokens) { jsmntok_t *token; int start = parser->pos; parser->pos++; /* Skip starting quote */ for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { char c = js[parser->pos]; /* Quote: end of string */ if (c == '\"') { if (tokens == NULL) { return 0; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) { parser->pos = start; return JSMN_ERROR_NOMEM; } jsmn_fill_token(token, JSMN_STRING, start+1, parser->pos); #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif return 0; } /* Backslash: Quoted symbol expected */ if (c == '\\' && parser->pos + 1 < len) { int i; parser->pos++; switch (js[parser->pos]) { /* Allowed escaped symbols */ case '\"': case '/' : case '\\' : case 'b' : case 'f' : case 'r' : case 'n' : case 't' : break; /* Allows escaped symbol \uXXXX */ case 'u': parser->pos++; for(i = 0; i < 4 && parser->pos < len && js[parser->pos] != '\0'; i++) { /* If it isn't a hex character we have an error */ if(!((js[parser->pos] >= 48 && js[parser->pos] <= 57) || /* 0-9 */ (js[parser->pos] >= 65 && js[parser->pos] <= 70) || /* A-F */ (js[parser->pos] >= 97 && js[parser->pos] <= 102))) { /* a-f */ parser->pos = start; return JSMN_ERROR_INVAL; } parser->pos++; } parser->pos--; break; /* Unexpected symbol */ default: parser->pos = start; return JSMN_ERROR_INVAL; } } } parser->pos = start; return JSMN_ERROR_PART; } /** * Parse JSON string and fill tokens. */ int jsmn_parse(jsmn_parser *parser, const char *js, size_t len, jsmntok_t *tokens, unsigned int num_tokens) { int r; int i; jsmntok_t *token; int count = parser->toknext; for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) { char c; jsmntype_t type; c = js[parser->pos]; switch (c) { case '{': case '[': count++; if (tokens == NULL) { break; } token = jsmn_alloc_token(parser, tokens, num_tokens); if (token == NULL) return JSMN_ERROR_NOMEM; if (parser->toksuper != -1) { tokens[parser->toksuper].size++; #ifdef JSMN_PARENT_LINKS token->parent = parser->toksuper; #endif } token->type = (c == '{' ? JSMN_OBJECT : JSMN_ARRAY); token->start = parser->pos; parser->toksuper = parser->toknext - 1; break; case '}': case ']': if (tokens == NULL) break; type = (c == '}' ? JSMN_OBJECT : JSMN_ARRAY); #ifdef JSMN_PARENT_LINKS if (parser->toknext < 1) { return JSMN_ERROR_INVAL; } token = &tokens[parser->toknext - 1]; for (;;) { if (token->start != -1 && token->end == -1) { if (token->type != type) { return JSMN_ERROR_INVAL; } token->end = parser->pos + 1; parser->toksuper = token->parent; break; } if (token->parent == -1) { if(token->type != type || parser->toksuper == -1) { return JSMN_ERROR_INVAL; } break; } token = &tokens[token->parent]; } #else for (i = parser->toknext - 1; i >= 0; i--) { token = &tokens[i]; if (token->start != -1 && token->end == -1) { if (token->type != type) { return JSMN_ERROR_INVAL; } parser->toksuper = -1; token->end = parser->pos + 1; break; } } /* Error if unmatched closing bracket */ if (i == -1) return JSMN_ERROR_INVAL; for (; i >= 0; i--) { token = &tokens[i]; if (token->start != -1 && token->end == -1) { parser->toksuper = i; break; } } #endif break; case '\"': r = jsmn_parse_string(parser, js, len, tokens, num_tokens); if (r < 0) return r; count++; if (parser->toksuper != -1 && tokens != NULL) tokens[parser->toksuper].size++; break; case '\t' : case '\r' : case '\n' : case ' ': break; case ':': parser->toksuper = parser->toknext - 1; break; case ',': if (tokens != NULL && parser->toksuper != -1 && tokens[parser->toksuper].type != JSMN_ARRAY && tokens[parser->toksuper].type != JSMN_OBJECT) { #ifdef JSMN_PARENT_LINKS parser->toksuper = tokens[parser->toksuper].parent; #else for (i = parser->toknext - 1; i >= 0; i--) { if (tokens[i].type == JSMN_ARRAY || tokens[i].type == JSMN_OBJECT) { if (tokens[i].start != -1 && tokens[i].end == -1) { parser->toksuper = i; break; } } } #endif } break; #ifdef JSMN_STRICT /* In strict mode primitives are: numbers and booleans */ case '-': case '0': case '1' : case '2': case '3' : case '4': case '5': case '6': case '7' : case '8': case '9': case 't': case 'f': case 'n' : /* And they must not be keys of the object */ if (tokens != NULL && parser->toksuper != -1) { jsmntok_t *t = &tokens[parser->toksuper]; if (t->type == JSMN_OBJECT || (t->type == JSMN_STRING && t->size != 0)) { return JSMN_ERROR_INVAL; } } #else /* In non-strict mode every unquoted value is a primitive */ default: #endif r = jsmn_parse_primitive(parser, js, len, tokens, num_tokens); if (r < 0) return r; count++; if (parser->toksuper != -1 && tokens != NULL) tokens[parser->toksuper].size++; break; #ifdef JSMN_STRICT /* Unexpected char in strict mode */ default: return JSMN_ERROR_INVAL; #endif } } if (tokens != NULL) { for (i = parser->toknext - 1; i >= 0; i--) { /* Unmatched opened object or array */ if (tokens[i].start != -1 && tokens[i].end == -1) { return JSMN_ERROR_PART; } } } return count; } /** * Creates a new parser based over a given buffer with an array of tokens * available. */ void jsmn_init(jsmn_parser *parser) { parser->pos = 0; parser->toknext = 0; parser->toksuper = -1; } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/file_parsing.c0000644000004100000410000001351413606417577025130 0ustar www-datawww-data#include #include #include #include "file_format.h" #include "file_parsing.h" #include "globals.h" #include "utils.h" HASHMAP_FUNCS_CREATE(entry, const entry_t, entry_t) typedef int (*compare_fn)(const void *a, const void *b); static size_t hashmap_hash_entry(const entry_t *entry) { return hashmap_hash_string(entry->json); } static int hashmap_compare_entry(const entry_t *a, const entry_t *b) { if (a->json_size != b->json_size) { return -1; } if (is_pid_significant(a) && (rb_str_equal(a->pid, b->pid) == Qfalse)) { return -1; } return strncmp(a->json, b->json, a->json_size); } static void entry_free(entry_t *entry) { free(entry->json); free(entry); } static inline double min(double a, double b) { return a < b ? a : b; } static inline double max(double a, double b) { return a > b ? a : b; } static void merge_entry(entry_t *found, const entry_t *entry) { if (entry->type == sym_gauge) { if (entry->multiprocess_mode == sym_min) { found->value = min(found->value, entry->value); } else if (entry->multiprocess_mode == sym_max) { found->value = max(found->value, entry->value); } else if (entry->multiprocess_mode == sym_livesum) { found->value += entry->value; } else { found->value = entry->value; } } else { found->value += entry->value; } } void merge_or_store(struct hashmap *map, entry_t *entry) { entry_t *found = entry_hashmap_get(map, entry); if (found) { merge_entry(found, entry); entry_free(entry); } else { entry_hashmap_put(map, entry, entry); // use the hashmap like hashset actually } } entry_t *entry_new(buffer_t *source, uint32_t pos, uint32_t encoded_len, file_t *file_info) { entry_t *entry = calloc(1, sizeof(entry_t)); if (entry == NULL) { return NULL; } entry->json = malloc(encoded_len + 1); if (entry->json == NULL) { free(entry); return NULL; } memcpy(entry->json, source->buffer + pos, encoded_len); entry->json[encoded_len] = '\0'; entry->json_size = encoded_len; entry->pid = file_info->pid; entry->multiprocess_mode = file_info->multiprocess_mode; entry->type = file_info->type; char *value_ptr = source->buffer + pos + encoded_len + padding_length(encoded_len); memcpy(&(entry->value), value_ptr, sizeof(double)); return entry; } static int add_parsed_name(entry_t *entry) { jsmn_parser parser; jsmn_init(&parser); jsmntok_t tokens[2]; memset(&tokens, 0, sizeof(tokens)); jsmn_parse(&parser, entry->json, entry->json_size, tokens, 2); jsmntok_t *name_token = &tokens[1]; if (name_token->start < name_token->end && name_token->start > 0) { entry->name = entry->json + name_token->start; entry->name_len = name_token->end - name_token->start; return 1; } return 0; } static int entry_lexical_comparator(const entry_t **a, const entry_t **b) { size_t size_a = (*a)->json_size; size_t size_b = (*b)->json_size; size_t min_length = size_a < size_b ? size_a : size_b; return strncmp((*a)->json, (*b)->json, min_length); } void hashmap_setup(struct hashmap *map) { hashmap_init(map, (size_t(*)(const void *))hashmap_hash_entry, (int (*)(const void *, const void *))hashmap_compare_entry, 1000); hashmap_set_key_alloc_funcs(map, NULL, (void (*)(void *))entry_free); } int process_buffer(file_t *file_info, buffer_t *source, struct hashmap *map) { if (source->size < START_POSITION) { // nothing to read return 1; } uint32_t used; memcpy(&used, source->buffer, sizeof(uint32_t)); if (used > source->size) { save_exception(prom_eParsingError, "source file %s corrupted, used %u > file size %u", file_info->path, used, source->size); return 0; } uint32_t pos = START_POSITION; while (pos + sizeof(uint32_t) < used) { uint32_t encoded_len; memcpy(&encoded_len, source->buffer + pos, sizeof(uint32_t)); pos += sizeof(uint32_t); uint32_t value_offset = encoded_len + padding_length(encoded_len); if (pos + value_offset + sizeof(double) > used) { save_exception(prom_eParsingError, "source file %s corrupted, used %u < stored data length %u", file_info->path, used, pos + value_offset + sizeof(double)); return 0; } entry_t *entry = entry_new(source, pos, encoded_len, file_info); if (entry == NULL) { save_exception(rb_eNoMemError, "Failed creating metrics entry"); return 0; } merge_or_store(map, entry); pos += value_offset + sizeof(double); } return 1; } int sort_map_entries(const struct hashmap *map, entry_t ***sorted_entries) { size_t num = hashmap_size(map); entry_t **list = calloc(num, sizeof(entry_t *)); if (list == NULL) { save_exception(rb_eNoMemError, "Couldn't allocate for %zu memory", num * sizeof(entry_t *)); return 0; } size_t cnt = 0; struct hashmap_iter *iter; for (iter = hashmap_iter(map); iter; iter = hashmap_iter_next(map, iter)) { entry_t *entry = (entry_t *)entry_hashmap_iter_get_key(iter); if (add_parsed_name(entry)) { list[cnt] = entry; cnt++; } } if (cnt != num) { save_exception(rb_eRuntimeError, "Processed entries %zu != map entries %zu", cnt, num); free(list); return 0; } qsort(list, cnt, sizeof(entry_t *), (compare_fn)&entry_lexical_comparator); *sorted_entries = list; return 1; } int is_pid_significant(const entry_t *e) { ID mp = e->multiprocess_mode; return e->type == sym_gauge && !(mp == sym_min || mp == sym_max || mp == sym_livesum); } prometheus-client-mmap-0.10.0/ext/fast_mmaped_file/file_parsing.h0000644000004100000410000000100613606417577025126 0ustar www-datawww-data#ifndef FILE_PARSING_H #define FILE_PARSING_H #include #include typedef struct { char *json; size_t json_size; char *name; size_t name_len; ID multiprocess_mode; ID type; VALUE pid; double value; } entry_t; void hashmap_setup(struct hashmap *map); int process_buffer(file_t *file_info, buffer_t *source, struct hashmap *map); int sort_map_entries(const struct hashmap *map, entry_t ***sorted_entries); int is_pid_significant(const entry_t *e); #endif