thread_safe-0.3.5/ 0000755 0000041 0000041 00000000000 12530443654 014022 5 ustar www-data www-data thread_safe-0.3.5/yard-template/ 0000755 0000041 0000041 00000000000 12530443654 016572 5 ustar www-data www-data thread_safe-0.3.5/yard-template/default/ 0000755 0000041 0000041 00000000000 12530443654 020216 5 ustar www-data www-data thread_safe-0.3.5/yard-template/default/layout/ 0000755 0000041 0000041 00000000000 12530443654 021533 5 ustar www-data www-data thread_safe-0.3.5/yard-template/default/layout/html/ 0000755 0000041 0000041 00000000000 12530443654 022477 5 ustar www-data www-data thread_safe-0.3.5/yard-template/default/layout/html/footer.erb 0000644 0000041 0000041 00000001200 12530443654 024460 0 ustar www-data www-data
thread_safe-0.3.5/yard-template/default/fulldoc/ 0000755 0000041 0000041 00000000000 12530443654 021646 5 ustar www-data www-data thread_safe-0.3.5/yard-template/default/fulldoc/html/ 0000755 0000041 0000041 00000000000 12530443654 022612 5 ustar www-data www-data thread_safe-0.3.5/yard-template/default/fulldoc/html/css/ 0000755 0000041 0000041 00000000000 12530443654 023402 5 ustar www-data www-data thread_safe-0.3.5/yard-template/default/fulldoc/html/css/common.css 0000644 0000041 0000041 00000004146 12530443654 025411 0 ustar www-data www-data /* Override this file with custom rules */ body { line-height: 18px; } .docstring code, .docstring .object_link a, #filecontents code { padding: 0px 3px 1px 3px; border: 1px solid #eef; background: #f5f5ff; } #filecontents pre code, .docstring pre code { border: none; background: none; padding: 0; } #filecontents pre.code, .docstring pre.code, .tags pre.example, .docstring code, .docstring .object_link a, #filecontents code { -moz-border-radius: 2px; -webkit-border-radius: 2px; } /* syntax highlighting */ .source_code { display: none; padding: 3px 8px; border-left: 8px solid #ddd; margin-top: 5px; } #filecontents pre.code, .docstring pre.code, .source_code pre { font-family: monospace; } #filecontents pre.code, .docstring pre.code { display: block; } .source_code .lines { padding-right: 12px; color: #555; text-align: right; } #filecontents pre.code, .docstring pre.code, .tags pre.example { padding: 5px 12px; margin-top: 4px; border: 1px solid #eef; background: #f5f5ff; } pre.code { color: #000; } pre.code .info.file { color: #555; } pre.code .val { color: #036A07; } pre.code .tstring_content, pre.code .heredoc_beg, pre.code .heredoc_end, pre.code .qwords_beg, pre.code .qwords_end, pre.code .tstring, pre.code .dstring { color: #036A07; } pre.code .fid, pre.code .rubyid_new, pre.code .rubyid_to_s, pre.code .rubyid_to_sym, pre.code .rubyid_to_f, pre.code .rubyid_to_i, pre.code .rubyid_each { color: inherit; } pre.code .comment { color: #777; font-style: italic; } pre.code .const, pre.code .constant { color: inherit; font-weight: bold; font-style: italic; } pre.code .label, pre.code .symbol { color: #C5060B; } pre.code .kw, pre.code .rubyid_require, pre.code .rubyid_extend, pre.code .rubyid_include, pre.code .int { color: #0000FF; } pre.code .ivar { color: #660E7A; } pre.code .gvar, pre.code .rubyid_backref, pre.code .rubyid_nth_ref { color: #6D79DE; } pre.code .regexp, .dregexp { color: #036A07; } pre.code a { border-bottom: 1px dotted #bbf; } thread_safe-0.3.5/Rakefile 0000644 0000041 0000041 00000003076 12530443654 015475 0 ustar www-data www-data require "bundler/gem_tasks" require "rake/testtask" ## safely load all the rake tasks in the `tasks` directory def safe_load(file) begin load file rescue LoadError => ex puts "Error loading rake tasks from '#{file}' but will continue..." puts ex.message end end Dir.glob('tasks/**/*.rake').each do |rakefile| safe_load rakefile end task :default => :test if defined?(JRUBY_VERSION) require "ant" directory "pkg/classes" directory 'pkg/tests' desc "Clean up build artifacts" task :clean do rm_rf "pkg/classes" rm_rf "pkg/tests" rm_rf "lib/thread_safe/jruby_cache_backend.jar" end desc "Compile the extension" task :compile => "pkg/classes" do |t| ant.javac :srcdir => "ext", :destdir => t.prerequisites.first, :source => "1.5", :target => "1.5", :debug => true, :classpath => "${java.class.path}:${sun.boot.class.path}" end desc "Build the jar" task :jar => :compile do ant.jar :basedir => "pkg/classes", :destfile => "lib/thread_safe/jruby_cache_backend.jar", :includes => "**/*.class" end desc "Build test jar" task 'test-jar' => 'pkg/tests' do |t| ant.javac :srcdir => 'test/src', :destdir => t.prerequisites.first, :source => "1.5", :target => "1.5", :debug => true ant.jar :basedir => 'pkg/tests', :destfile => 'test/package.jar', :includes => '**/*.class' end task :package => [ :jar, 'test-jar' ] else # No need to package anything for non-jruby rubies task :package end Rake::TestTask.new :test => :package do |t| t.libs << "lib" t.test_files = FileList["test/**/*.rb"] end thread_safe-0.3.5/Gemfile 0000644 0000041 0000041 00000001016 12530443654 015313 0 ustar www-data www-data source 'https://rubygems.org' gemspec group :development, :test do gem 'minitest', '~> 5.5.1' gem 'minitest-reporters', '~> 1.0.11' gem 'simplecov', '~> 0.9.2', :require => false gem 'coveralls', '~> 0.7.11', :require => false end group :documentation do gem 'countloc', '~> 0.4.0', :platforms => :mri, :require => false gem 'yard', '~> 0.8.7.6', :require => false gem 'inch', '~> 0.5.10', :platforms => :mri, :require => false gem 'redcarpet', '~> 3.2.2', platforms: :mri # understands github markdown end thread_safe-0.3.5/examples/ 0000755 0000041 0000041 00000000000 12530443654 015640 5 ustar www-data www-data thread_safe-0.3.5/examples/bench_cache.rb 0000755 0000041 0000041 00000001131 12530443654 020366 0 ustar www-data www-data #!/usr/bin/env ruby -wKU require "benchmark" require "thread_safe" hash = {} cache = ThreadSafe::Cache.new ENTRIES = 10_000 ENTRIES.times do |i| hash[i] = i cache[i] = i end TESTS = 40_000_000 Benchmark.bmbm do |results| key = rand(10_000) results.report('Hash#[]') do TESTS.times { hash[key] } end results.report('Cache#[]') do TESTS.times { cache[key] } end results.report('Hash#each_pair') do (TESTS / ENTRIES).times { hash.each_pair {|k,v| v} } end results.report('Cache#each_pair') do (TESTS / ENTRIES).times { cache.each_pair {|k,v| v} } end end thread_safe-0.3.5/thread_safe.gemspec 0000644 0000041 0000041 00000002427 12530443654 017641 0 ustar www-data www-data # -*- encoding: utf-8 -*- $:.push File.expand_path('../lib', __FILE__) unless $:.include?('lib') require 'thread_safe/version' Gem::Specification.new do |gem| gem.authors = ["Charles Oliver Nutter", "thedarkone"] gem.email = ["headius@headius.com", "thedarkone2@gmail.com"] gem.description = %q{Thread-safe collections and utilities for Ruby} gem.summary = %q{A collection of data structures and utilities to make thread-safe programming in Ruby easier} gem.homepage = "https://github.com/ruby-concurrency/thread_safe" gem.files = `git ls-files`.split($\) gem.files += ['lib/thread_safe/jruby_cache_backend.jar'] if defined?(JRUBY_VERSION) gem.files -= ['.gitignore'] # see https://github.com/headius/thread_safe/issues/40#issuecomment-42315441 gem.platform = 'java' if defined?(JRUBY_VERSION) gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) } gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) gem.name = "thread_safe" gem.require_paths = ["lib"] gem.version = ThreadSafe::VERSION gem.license = "Apache-2.0" gem.add_development_dependency 'atomic', '= 1.1.16' gem.add_development_dependency 'rake' gem.add_development_dependency 'minitest', '>= 4' end thread_safe-0.3.5/.travis.yml 0000644 0000041 0000041 00000001326 12530443654 016135 0 ustar www-data www-data language: ruby rvm: - 2.2.0 - 2.1.5 - 2.1.4 - 2.0.0 - 1.9.3 - ruby-head - jruby-1.7.18 - jruby-head - rbx-2 jdk: # for JRuby only - openjdk7 - oraclejdk8 matrix: exclude: - rvm: 2.2.0 jdk: openjdk7 jdk: oraclejdk8 - rvm: 2.1.5 jdk: openjdk7 jdk: oraclejdk8 - rvm: 2.1.4 jdk: openjdk7 jdk: oraclejdk8 - rvm: 2.0.0 jdk: openjdk7 jdk: oraclejdk8 - rvm: 1.9.3 jdk: openjdk7 jdk: oraclejdk8 - rvm: ruby-head jdk: openjdk7 jdk: oraclejdk8 - rvm: rbx-2 jdk: openjdk7 jdk: oraclejdk8 allow_failures: - rvm: ruby-head - rvm: jruby-head - rvm: 1.9.3 script: "rake TESTOPTS='--seed=1'" thread_safe-0.3.5/lib/ 0000755 0000041 0000041 00000000000 12530443654 014570 5 ustar www-data www-data thread_safe-0.3.5/lib/thread_safe.rb 0000644 0000041 0000041 00000003610 12530443654 017362 0 ustar www-data www-data require 'thread_safe/version' require 'thread_safe/synchronized_delegator' module ThreadSafe autoload :Cache, 'thread_safe/cache' autoload :Util, 'thread_safe/util' # Various classes within allows for +nil+ values to be stored, so a special +NULL+ token is required to indicate the "nil-ness". NULL = Object.new if defined?(JRUBY_VERSION) require 'jruby/synchronized' # A thread-safe subclass of Array. This version locks # against the object itself for every method call, # ensuring only one thread can be reading or writing # at a time. This includes iteration methods like # #each. class Array < ::Array include JRuby::Synchronized end # A thread-safe subclass of Hash. This version locks # against the object itself for every method call, # ensuring only one thread can be reading or writing # at a time. This includes iteration methods like # #each. class Hash < ::Hash include JRuby::Synchronized end elsif !defined?(RUBY_ENGINE) || RUBY_ENGINE == 'ruby' # Because MRI never runs code in parallel, the existing # non-thread-safe structures should usually work fine. Array = ::Array Hash = ::Hash elsif defined?(RUBY_ENGINE) && RUBY_ENGINE == 'rbx' require 'monitor' class Hash < ::Hash; end class Array < ::Array; end [Hash, Array].each do |klass| klass.class_eval do private def _mon_initialize @_monitor = Monitor.new unless @_monitor # avoid double initialisation end def self.allocate obj = super obj.send(:_mon_initialize) obj end end klass.superclass.instance_methods(false).each do |method| klass.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{method}(*args) @_monitor.synchronize { super } end RUBY_EVAL end end end end thread_safe-0.3.5/lib/thread_safe/ 0000755 0000041 0000041 00000000000 12530443654 017035 5 ustar www-data www-data thread_safe-0.3.5/lib/thread_safe/cache.rb 0000644 0000041 0000041 00000011006 12530443654 020423 0 ustar www-data www-data require 'thread' module ThreadSafe autoload :JRubyCacheBackend, 'thread_safe/jruby_cache_backend' autoload :MriCacheBackend, 'thread_safe/mri_cache_backend' autoload :NonConcurrentCacheBackend, 'thread_safe/non_concurrent_cache_backend' autoload :AtomicReferenceCacheBackend, 'thread_safe/atomic_reference_cache_backend' autoload :SynchronizedCacheBackend, 'thread_safe/synchronized_cache_backend' ConcurrentCacheBackend = if defined?(RUBY_ENGINE) case RUBY_ENGINE when 'jruby'; JRubyCacheBackend when 'ruby'; MriCacheBackend when 'rbx'; AtomicReferenceCacheBackend else warn 'ThreadSafe: unsupported Ruby engine, using a fully synchronized ThreadSafe::Cache implementation' if $VERBOSE SynchronizedCacheBackend end else MriCacheBackend end class Cache < ConcurrentCacheBackend KEY_ERROR = defined?(KeyError) ? KeyError : IndexError # there is no KeyError in 1.8 mode def initialize(options = nil, &block) if options.kind_of?(::Hash) validate_options_hash!(options) else options = nil end super(options) @default_proc = block end def [](key) if value = super # non-falsy value is an existing mapping, return it right away value # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value # would be returned) # note: nil == value check is not technically necessary elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) @default_proc.call(self, key) else value end end alias_method :get, :[] alias_method :put, :[]= def fetch(key, default_value = NULL) if NULL != (value = get_or_default(key, NULL)) value elsif block_given? yield key elsif NULL != default_value default_value else raise_fetch_no_key end end def fetch_or_store(key, default_value = NULL) fetch(key) do put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) end end def put_if_absent(key, value) computed = false result = compute_if_absent(key) do computed = true value end computed ? nil : result end unless method_defined?(:put_if_absent) def value?(value) each_value do |v| return true if value.equal?(v) end false end unless method_defined?(:value?) def keys arr = [] each_pair {|k, v| arr << k} arr end unless method_defined?(:keys) def values arr = [] each_pair {|k, v| arr << v} arr end unless method_defined?(:values) def each_key each_pair {|k, v| yield k} end unless method_defined?(:each_key) def each_value each_pair {|k, v| yield v} end unless method_defined?(:each_value) def key(value) each_pair {|k, v| return k if v == value} nil end unless method_defined?(:key) alias_method :index, :key if RUBY_VERSION < '1.9' def empty? each_pair {|k, v| return false} true end unless method_defined?(:empty?) def size count = 0 each_pair {|k, v| count += 1} count end unless method_defined?(:size) def marshal_dump raise TypeError, "can't dump hash with default proc" if @default_proc h = {} each_pair {|k, v| h[k] = v} h end def marshal_load(hash) initialize populate_from(hash) end undef :freeze private def raise_fetch_no_key raise KEY_ERROR, 'key not found' end def initialize_copy(other) super populate_from(other) end def populate_from(hash) hash.each_pair {|k, v| self[k] = v} self end def validate_options_hash!(options) if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Fixnum) || initial_capacity < 0) raise ArgumentError, ":initial_capacity must be a positive Fixnum" end if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) raise ArgumentError, ":load_factor must be a number between 0 and 1" end end end end thread_safe-0.3.5/lib/thread_safe/synchronized_cache_backend.rb 0000644 0000041 0000041 00000002510 12530443654 024671 0 ustar www-data www-data module ThreadSafe class SynchronizedCacheBackend < NonConcurrentCacheBackend require 'mutex_m' include Mutex_m # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are # not allowed to call each other. def [](key) synchronize { super } end def []=(key, value) synchronize { super } end def compute_if_absent(key) synchronize { super } end def compute_if_present(key) synchronize { super } end def compute(key) synchronize { super } end def merge_pair(key, value) synchronize { super } end def replace_pair(key, old_value, new_value) synchronize { super } end def replace_if_exists(key, new_value) synchronize { super } end def get_and_set(key, value) synchronize { super } end def key?(key) synchronize { super } end def value?(value) synchronize { super } end def delete(key) synchronize { super } end def delete_pair(key, value) synchronize { super } end def clear synchronize { super } end def size synchronize { super } end def get_or_default(key, default_value) synchronize { super } end private def dupped_backend synchronize { super } end end end thread_safe-0.3.5/lib/thread_safe/non_concurrent_cache_backend.rb 0000644 0000041 0000041 00000005246 12530443654 025217 0 ustar www-data www-data module ThreadSafe class NonConcurrentCacheBackend # WARNING: all public methods of the class must operate on the @backend # directly without calling each other. This is important because of the # SynchronizedCacheBackend which uses a non-reentrant mutex for perfomance # reasons. def initialize(options = nil) @backend = {} end def [](key) @backend[key] end def []=(key, value) @backend[key] = value end def compute_if_absent(key) if NULL != (stored_value = @backend.fetch(key, NULL)) stored_value else @backend[key] = yield end end def replace_pair(key, old_value, new_value) if pair?(key, old_value) @backend[key] = new_value true else false end end def replace_if_exists(key, new_value) if NULL != (stored_value = @backend.fetch(key, NULL)) @backend[key] = new_value stored_value end end def compute_if_present(key) if NULL != (stored_value = @backend.fetch(key, NULL)) store_computed_value(key, yield(stored_value)) end end def compute(key) store_computed_value(key, yield(@backend[key])) end def merge_pair(key, value) if NULL == (stored_value = @backend.fetch(key, NULL)) @backend[key] = value else store_computed_value(key, yield(stored_value)) end end def get_and_set(key, value) stored_value = @backend[key] @backend[key] = value stored_value end def key?(key) @backend.key?(key) end def value?(value) @backend.value?(value) end def delete(key) @backend.delete(key) end def delete_pair(key, value) if pair?(key, value) @backend.delete(key) true else false end end def clear @backend.clear self end def each_pair dupped_backend.each_pair do |k, v| yield k, v end self end def size @backend.size end def get_or_default(key, default_value) @backend.fetch(key, default_value) end alias_method :_get, :[] alias_method :_set, :[]= private :_get, :_set private def initialize_copy(other) super @backend = {} self end def dupped_backend @backend.dup end def pair?(key, expected_value) NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) end def store_computed_value(key, new_value) if new_value.nil? @backend.delete(key) nil else @backend[key] = new_value end end end end thread_safe-0.3.5/lib/thread_safe/mri_cache_backend.rb 0000644 0000041 0000041 00000004042 12530443654 022743 0 ustar www-data www-data module ThreadSafe class MriCacheBackend < NonConcurrentCacheBackend # We can get away with a single global write lock (instead of a per-instance # one) because of the GVL/green threads. # # The previous implementation used `Thread.critical` on 1.8 MRI to implement # the 4 composed atomic operations (`put_if_absent`, `replace_pair`, # `replace_if_exists`, `delete_pair`) this however doesn't work for # `compute_if_absent` because on 1.8 the Mutex class is itself implemented # via `Thread.critical` and a call to `Mutex#lock` does not restore the # previous `Thread.critical` value (thus any synchronisation clears the # `Thread.critical` flag and we loose control). This poses a problem as the # provided block might use synchronisation on its own. # # NOTE: a neat idea of writing a c-ext to manually perform atomic # put_if_absent, while relying on Ruby not releasing a GVL while calling a # c-ext will not work because of the potentially Ruby implemented `#hash` # and `#eql?` key methods. WRITE_LOCK = Mutex.new def []=(key, value) WRITE_LOCK.synchronize { super } end def compute_if_absent(key) if stored_value = _get(key) # fast non-blocking path for the most likely case stored_value else WRITE_LOCK.synchronize { super } end end def compute_if_present(key) WRITE_LOCK.synchronize { super } end def compute(key) WRITE_LOCK.synchronize { super } end def merge_pair(key, value) WRITE_LOCK.synchronize { super } end def replace_pair(key, old_value, new_value) WRITE_LOCK.synchronize { super } end def replace_if_exists(key, new_value) WRITE_LOCK.synchronize { super } end def get_and_set(key, value) WRITE_LOCK.synchronize { super } end def delete(key) WRITE_LOCK.synchronize { super } end def delete_pair(key, value) WRITE_LOCK.synchronize { super } end def clear WRITE_LOCK.synchronize { super } end end end thread_safe-0.3.5/lib/thread_safe/version.rb 0000644 0000041 0000041 00000000754 12530443654 021055 0 ustar www-data www-data module ThreadSafe VERSION = "0.3.5" end # NOTE: <= 0.2.0 used Threadsafe::VERSION # @private module Threadsafe # @private def self.const_missing(name) name = name.to_sym if ThreadSafe.const_defined?(name) warn "[DEPRECATION] `Threadsafe::#{name}' is deprecated, use `ThreadSafe::#{name}' instead." ThreadSafe.const_get(name) else warn "[DEPRECATION] the `Threadsafe' module is deprecated, please use `ThreadSafe` instead." super end end end thread_safe-0.3.5/lib/thread_safe/util/ 0000755 0000041 0000041 00000000000 12530443654 020012 5 ustar www-data www-data thread_safe-0.3.5/lib/thread_safe/util/power_of_two_tuple.rb 0000644 0000041 0000041 00000001142 12530443654 024257 0 ustar www-data www-data module ThreadSafe module Util class PowerOfTwoTuple < VolatileTuple def initialize(size) raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 super(size) end def hash_to_index(hash) (size - 1) & hash end def volatile_get_by_hash(hash) volatile_get(hash_to_index(hash)) end def volatile_set_by_hash(hash, value) volatile_set(hash_to_index(hash), value) end def next_in_size_table self.class.new(size << 1) end end end end thread_safe-0.3.5/lib/thread_safe/util/striped64.rb 0000644 0000041 0000041 00000021515 12530443654 022167 0 ustar www-data www-data module ThreadSafe module Util # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 # available in public domain. # # Original source code available here: # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 # # Class holding common representation and mechanics for classes supporting # dynamic striping on 64bit values. # # This class maintains a lazily-initialized table of atomically updated # variables, plus an extra +base+ field. The table size is a power of two. # Indexing uses masked per-thread hash codes. Nearly all methods on this # class are private, accessed directly by subclasses. # # Table entries are of class +Cell+; a variant of AtomicLong padded to # reduce cache contention on most processors. Padding is overkill for most # Atomics because they are usually irregularly scattered in memory and thus # don't interfere much with each other. But Atomic objects residing in # arrays will tend to be placed adjacent to each other, and so will most # often share cache lines (with a huge negative performance impact) without # this precaution. # # In part because +Cell+s are relatively large, we avoid creating them until # they are needed. When there is no contention, all updates are made to the # +base+ field. Upon first contention (a failed CAS on +base+ update), the # table is initialized to size 2. The table size is doubled upon further # contention until reaching the nearest power of two greater than or equal # to the number of CPUS. Table slots remain empty (+nil+) until they are # needed. # # A single spinlock (+busy+) is used for initializing and resizing the # table, as well as populating slots with new +Cell+s. There is no need for # a blocking lock: When the lock is not available, threads try other slots # (or the base). During these retries, there is increased contention and # reduced locality, which is still better than alternatives. # # Per-thread hash codes are initialized to random values. Contention and/or # table collisions are indicated by failed CASes when performing an update # operation (see method +retry_update+). Upon a collision, if the table size # is less than the capacity, it is doubled in size unless some other thread # holds the lock. If a hashed slot is empty, and lock is available, a new # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries # proceed by "double hashing", using a secondary hash (XorShift) to try to # find a free slot. # # The table size is capped because, when there are more threads than CPUs, # supposing that each thread were bound to a CPU, there would exist a # perfect hash function mapping threads to slots that eliminates collisions. # When we reach capacity, we search for this mapping by randomly varying the # hash codes of colliding threads. Because search is random, and collisions # only become known via CAS failures, convergence can be slow, and because # threads are typically not bound to CPUS forever, may not occur at all. # However, despite these limitations, observed contention rates are # typically low in these cases. # # It is possible for a +Cell+ to become unused when threads that once hashed # to it terminate, as well as in the case where doubling the table causes no # thread to hash to it under expanded mask. We do not try to detect or # remove such cells, under the assumption that for long-running instances, # observed contention levels will recur, so the cells will eventually be # needed again; and for short-lived ones, it does not matter. class Striped64 # Padded variant of AtomicLong supporting only raw accesses plus CAS. # The +value+ field is placed between pads, hoping that the JVM doesn't # reorder them. # # Optimisation note: It would be possible to use a release-only # form of CAS here, if it were provided. class Cell < AtomicReference # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot attr_reader *(Array.new(12).map {|i| :"padding_#{i}"}) alias_method :cas, :compare_and_set def cas_computed cas(current_value = value, yield(current_value)) end end extend Volatile attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. alias_method :busy?, :busy def initialize super() self.busy = false self.base = 0 end # Handles cases of updates involving initialization, resizing, # creating new Cells, and/or contention. See above for # explanation. This method suffers the usual non-modularity # problems of optimistic retry code, relying on rechecked sets of # reads. # # Arguments: # [+x+] # the value # [+hash_code+] # hash code used # [+x+] # false if CAS failed before call def retry_update(x, hash_code, was_uncontended) # :yields: current_value hash = hash_code collided = false # True if last slot nonempty while true if current_cells = cells if !(cell = current_cells.volatile_get_by_hash(hash)) if busy? collided = false else # Try to attach new Cell if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell break else redo # Slot is now non-empty end end elsif !was_uncontended # CAS already known to fail was_uncontended = true # Continue after rehash elsif cell.cas_computed {|current_value| yield current_value} break elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale collided = false elsif collided && expand_table_unless_stale(current_cells) collided = false redo # Retry with expanded table else collided = true end hash = XorShiftRandom.xorshift(hash) elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} break end end self.hash_code = hash end private # Static per-thread hash code key. Shared across all instances to # reduce Thread locals pollution and because adjustments due to # collisions in one table are likely to be appropriate for # others. THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym # A thread-local hash code accessor. The code is initially # random, but may be set to a different value upon collisions. def hash_code Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get end def hash_code=(hash) Thread.current[THREAD_LOCAL_KEY] = hash end # Sets base and all +cells+ to the given value. def internal_reset(initial_value) current_cells = cells self.base = initial_value if current_cells current_cells.each do |cell| cell.value = initial_value if cell end end end def cas_base_computed cas_base(current_base = base, yield(current_base)) end def free? !busy? end def try_initialize_cells(x, hash) if free? && !cells try_in_busy do unless cells # Recheck under lock new_cells = PowerOfTwoTuple.new(2) new_cells.volatile_set_by_hash(hash, Cell.new(x)) self.cells = new_cells end end end end def expand_table_unless_stale(current_cells) try_in_busy do if current_cells == cells # Recheck under lock new_cells = current_cells.next_in_size_table current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} self.cells = new_cells end end end def try_to_install_new_cell(new_cell, hash) try_in_busy do # Recheck under lock if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) current_cells.volatile_set(i, new_cell) end end end def try_in_busy if cas_busy(false, true) begin yield ensure self.busy = false end end end end end end thread_safe-0.3.5/lib/thread_safe/util/xor_shift_random.rb 0000644 0000041 0000041 00000002631 12530443654 023706 0 ustar www-data www-data module ThreadSafe module Util # A xorshift random number (positive +Fixnum+s) generator, provides # reasonably cheap way to generate thread local random numbers without # contending for the global +Kernel.rand+. # # Usage: # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed # while true # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number # do_something_at_random # end # end module XorShiftRandom extend self MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. def get Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted end # xorshift based on: http://www.jstatsoft.org/v08/i14/paper if 0.size == 4 # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows def xorshift(x) x ^= x >> 3 x ^= (x << 1) & MAX_INT # cut-off Bignum overflow x ^= x >> 14 end else # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows def xorshift(x) x ^= x >> 1 x ^= (x << 1) & MAX_INT # cut-off Bignum overflow x ^= x >> 54 end end end end end thread_safe-0.3.5/lib/thread_safe/util/atomic_reference.rb 0000644 0000041 0000041 00000002350 12530443654 023631 0 ustar www-data www-data module ThreadSafe module Util AtomicReference = if defined?(Rubinius::AtomicReference) # An overhead-less atomic reference. Rubinius::AtomicReference else begin require 'atomic' defined?(Atomic::InternalReference) ? Atomic::InternalReference : Atomic rescue LoadError, NameError require 'thread' # get Mutex on 1.8 class FullLockingAtomicReference def initialize(value = nil) @___mutex = Mutex.new @___value = value end def get @___mutex.synchronize { @___value } end alias_method :value, :get def set(new_value) @___mutex.synchronize { @___value = new_value } end alias_method :value=, :set def compare_and_set(old_value, new_value) return false unless @___mutex.try_lock begin return false unless @___value.equal? old_value @___value = new_value ensure @___mutex.unlock end true end end FullLockingAtomicReference end end end end thread_safe-0.3.5/lib/thread_safe/util/volatile.rb 0000644 0000041 0000041 00000004045 12530443654 022161 0 ustar www-data www-data module ThreadSafe module Util module Volatile # Provides +volatile+ (in the JVM's sense) attribute accessors implemented # atop of the +AtomicReference+s. # # Usage: # class Foo # extend ThreadSafe::Util::Volatile # attr_volatile :foo, :bar # # def initialize(bar) # super() # must super() into parent initializers before using the volatile attribute accessors # self.bar = bar # end # # def hello # my_foo = foo # volatile read # self.foo = 1 # volatile write # cas_foo(1, 2) # => true | a strong CAS # end # end def attr_volatile(*attr_names) return if attr_names.empty? include(Module.new do atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = ThreadSafe::Util::AtomicReference.new"} initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" end class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def initialize(*) super #{atomic_ref_setup.join('; ')} end def initialize_copy(other) super #{initialize_copy_setup.join('; ')} end RUBY_EVAL attr_names.each do |attr_name| class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{attr_name} @__#{attr_name}.get end def #{attr_name}=(value) @__#{attr_name}.set(value) end def compare_and_set_#{attr_name}(old_value, new_value) @__#{attr_name}.compare_and_set(old_value, new_value) end RUBY_EVAL alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" end end) end end end end thread_safe-0.3.5/lib/thread_safe/util/volatile_tuple.rb 0000644 0000041 0000041 00000002005 12530443654 023364 0 ustar www-data www-data module ThreadSafe module Util # A fixed size array with volatile volatile getters/setters. # Usage: # arr = VolatileTuple.new(16) # arr.volatile_set(0, :foo) # arr.volatile_get(0) # => :foo # arr.cas(0, :foo, :bar) # => true # arr.volatile_get(0) # => :bar class VolatileTuple include Enumerable Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : Array def initialize(size) @tuple = tuple = Tuple.new(size) i = 0 while i < size tuple[i] = AtomicReference.new i += 1 end end def volatile_get(i) @tuple[i].get end def volatile_set(i, value) @tuple[i].set(value) end def compare_and_set(i, old_value, new_value) @tuple[i].compare_and_set(old_value, new_value) end alias_method :cas, :compare_and_set def size @tuple.size end def each @tuple.each {|ref| yield ref.get} end end end end thread_safe-0.3.5/lib/thread_safe/util/adder.rb 0000644 0000041 0000041 00000004252 12530443654 021421 0 ustar www-data www-data module ThreadSafe module Util # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 # available in public domain. # # Original source code available here: # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 # # One or more variables that together maintain an initially zero # sum. When updates (method +add+) are contended across threads, # the set of variables may grow dynamically to reduce contention. # Method +sum+ returns the current total combined across the # variables maintaining the sum. # # This class is usually preferable to single +Atomic+ reference when # multiple threads update a common sum that is used for purposes such # as collecting statistics, not for fine-grained synchronization # control. Under low update contention, the two classes have similar # characteristics. But under high contention, expected throughput of # this class is significantly higher, at the expense of higher space # consumption. class Adder < Striped64 # Adds the given value. def add(x) if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} was_uncontended = true hash = hash_code unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) retry_update(x, hash, was_uncontended) {|current_value| current_value + x} end end end def increment add(1) end def decrement add(-1) end # Returns the current sum. The returned value is _NOT_ an # atomic snapshot: Invocation in the absence of concurrent # updates returns an accurate result, but concurrent updates that # occur while the sum is being calculated might not be # incorporated. def sum x = base if current_cells = cells current_cells.each do |cell| x += cell.value if cell end end x end def reset internal_reset(0) end end end end thread_safe-0.3.5/lib/thread_safe/util/cheap_lockable.rb 0000644 0000041 0000041 00000006110 12530443654 023251 0 ustar www-data www-data module ThreadSafe module Util # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ # with the +ConditionVariable+ bundled in. # # Usage: # class A # include CheapLockable # # def do_exlusively # cheap_synchronize { yield } # end # # def wait_for_something # cheap_synchronize do # cheap_wait until resource_available? # do_something # cheap_broadcast # wake up others # end # end # end module CheapLockable private engine = defined?(RUBY_ENGINE) && RUBY_ENGINE if engine == 'rbx' # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. def cheap_synchronize Rubinius.lock(self) begin yield ensure Rubinius.unlock(self) end end def cheap_wait wchan = Rubinius::Channel.new begin waiters = @waiters ||= [] waiters.push wchan Rubinius.unlock(self) signaled = wchan.receive_timeout nil ensure Rubinius.lock(self) unless signaled or waiters.delete(wchan) # we timed out, but got signaled afterwards (e.g. while waiting to # acquire @lock), so pass that signal on to the next waiter waiters.shift << true unless waiters.empty? end end self end def cheap_broadcast waiters = @waiters ||= [] waiters.shift << true until waiters.empty? self end elsif engine == 'jruby' # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects require 'jruby' def cheap_synchronize JRuby.reference0(self).synchronized { yield } end def cheap_wait JRuby.reference0(self).wait end def cheap_broadcast JRuby.reference0(self).notify_all end else require 'thread' extend Volatile attr_volatile :mutex # Non-reentrant Mutex#syncrhonize def cheap_synchronize true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) my_mutex.synchronize { yield } end # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. # Must only be called in +cheap_broadcast+'s block. def cheap_wait conditional_variable = @conditional_variable ||= ConditionVariable.new conditional_variable.wait(mutex) end # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. # Must only be called in +cheap_broadcast+'s block. def cheap_broadcast if conditional_variable = @conditional_variable conditional_variable.broadcast end end end end end end thread_safe-0.3.5/lib/thread_safe/atomic_reference_cache_backend.rb 0000644 0000041 0000041 00000103634 12530443654 025455 0 ustar www-data www-data module ThreadSafe # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 # available in public domain. # # Original source code available here: # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 # # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose # size exceeds a threshold). # # A hash table supporting full concurrency of retrievals and high expected # concurrency for updates. However, even though all operations are # thread-safe, retrieval operations do _not_ entail locking, and there is # _not_ any support for locking the entire table in a way that prevents all # access. # # Retrieval operations generally do not block, so may overlap with update # operations. Retrievals reflect the results of the most recently _completed_ # update operations holding upon their onset. (More formally, an update # operation for a given key bears a _happens-before_ relation with any (non # +nil+) retrieval for that key reporting the updated value.) For aggregate # operations such as +clear()+, concurrent retrievals may reflect insertion or # removal of only some entries. Similarly, the +each_pair+ iterator yields # elements reflecting the state of the hash table at some point at or since # the start of the +each_pair+. Bear in mind that the results of aggregate # status methods including +size()+ and +empty?+} are typically useful only # when a map is not undergoing concurrent updates in other threads. Otherwise # the results of these methods reflect transient states that may be adequate # for monitoring or estimation purposes, but not for program control. # # The table is dynamically expanded when there are too many collisions (i.e., # keys that have distinct hash codes but fall into the same slot modulo the # table size), with the expected average effect of maintaining roughly two # bins per mapping (corresponding to a 0.75 load factor threshold for # resizing). There may be much variance around this average as mappings are # added and removed, but overall, this maintains a commonly accepted # time/space tradeoff for hash tables. However, resizing this or any other # kind of hash table may be a relatively slow operation. When possible, it is # a good idea to provide a size estimate as an optional :initial_capacity # initializer argument. An additional optional :load_factor constructor # argument provides a further means of customizing initial table capacity by # specifying the table density to be used in calculating the amount of space # to allocate for the given number of elements. Note that using many keys with # exactly the same +hash+ is a sure way to slow down performance of any hash # table. # # ## Design overview # # The primary design goal of this hash table is to maintain concurrent # readability (typically method +[]+, but also iteration and related methods) # while minimizing update contention. Secondary goals are to keep space # consumption about the same or better than plain +Hash+, and to support high # initial insertion rates on an empty table by many threads. # # Each key-value mapping is held in a +Node+. The validation-based approach # explained below leads to a lot of code sprawl because retry-control # precludes factoring into smaller methods. # # The table is lazily initialized to a power-of-two size upon the first # insertion. Each bin in the table normally contains a list of +Node+s (most # often, the list has only zero or one +Node+). Table accesses require # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are # always accurately traversable under volatile reads, so long as lookups check # hash code and non-nullness of value before checking key equality. # # We use the top two bits of +Node+ hash fields for control purposes -- they # are available anyway because of addressing constraints. As explained further # below, these top bits are used as follows: # # - 00 - Normal # - 01 - Locked # - 11 - Locked and may have a thread waiting for lock # - 10 - +Node+ is a forwarding node # # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, # except for forwarding nodes, for which the lower bits are zero (and so # always have hash field == +MOVED+). # # Insertion (via +[]=+ or its variants) of the first node in an empty bin is # performed by just CASing it to the bin. This is by far the most common case # for put operations under most key/hash distributions. Other update # operations (insert, delete, and replace) require locks. We do not want to # waste the space required to associate a distinct lock object with each bin, # so instead use the first node of a bin list itself as a lock. Blocking # support for these locks relies +Util::CheapLockable. However, we also need a # +try_lock+ construction, so we overlay these by using bits of the +Node+ # hash field for lock control (see above), and so normally use builtin # monitors only for blocking and signalling using # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. # # Using the first node of a list as a lock does not by itself suffice though: # When a node is locked, any update must first validate that it is still the # first node after locking it, and retry if not. Because new nodes are always # appended to lists, once a node is first in a bin, it remains first until # deleted or the bin becomes invalidated (upon resizing). However, operations # that only conditionally update may inspect nodes until the point of update. # This is a converse of sorts to the lazy locking technique described by # Herlihy & Shavit. # # The main disadvantage of per-bin locks is that other update operations on # other nodes in a bin list protected by the same lock can stall, for example # when user +eql?+ or mapping functions take a long time. However, # statistically, under random hash codes, this is not a common problem. # Ideally, the frequency of nodes in bins follows a Poisson distribution # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of # about 0.5 on average, given the resizing threshold of 0.75, although with a # large variance because of resizing granularity. Ignoring variance, the # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / # factorial(k)). The first values are: # # - 0: 0.60653066 # - 1: 0.30326533 # - 2: 0.07581633 # - 3: 0.01263606 # - 4: 0.00157952 # - 5: 0.00015795 # - 6: 0.00001316 # - 7: 0.00000094 # - 8: 0.00000006 # - more: less than 1 in ten million # # Lock contention probability for two threads accessing distinct elements is # roughly 1 / (8 * #elements) under random hashes. # # The table is resized when occupancy exceeds a percentage threshold # (nominally, 0.75, but see below). Only a single thread performs the resize # (using field +size_control+, to arrange exclusion), but the table otherwise # remains usable for reads and updates. Resizing proceeds by transferring # bins, one by one, from the table to the next table. Because we are using # power-of-two expansion, the elements from each bin must either stay at same # index, or move with a power of two offset. We eliminate unnecessary node # creation by catching cases where old nodes can be reused because their next # fields won't change. On average, only about one-sixth of them need cloning # when a table doubles. The nodes they replace will be garbage collectable as # soon as they are no longer referenced by any reader thread that may be in # the midst of concurrently traversing table. Upon transfer, the old table bin # contains only a special forwarding node (with hash field +MOVED+) that # contains the next table as its key. On encountering a forwarding node, # access and update operations restart, using the new table. # # Each bin transfer requires its bin lock. However, unlike other cases, a # transfer can skip a bin if it fails to acquire its lock, and revisit it # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that # have been skipped because of failure to acquire a lock, and blocks only if # none are available (i.e., only very rarely). The transfer operation must # also ensure that all accessible bins in both the old and new table are # usable by any traversal. When there are no lock acquisition failures, this # is arranged simply by proceeding from the last bin (+table.size - 1+) up # towards the first. Upon seeing a forwarding node, traversals arrange to move # to the new table without revisiting nodes. However, when any node is skipped # during a transfer, all earlier table bins may have become visible, so are # initialized with a reverse-forwarding node back to the old table until the # new ones are established. (This sometimes requires transiently locking a # forwarding node, which is possible under the above encoding.) These more # expensive mechanics trigger only when necessary. # # The traversal scheme also applies to partial traversals of # ranges of bins (via an alternate Traverser constructor) # to support partitioned aggregate operations. Also, read-only # operations give up if ever forwarded to a null table, which # provides support for shutdown-style clearing, which is also not # currently implemented. # # Lazy table initialization minimizes footprint until first use. # # The element count is maintained using a +ThreadSafe::Util::Adder+, # which avoids contention on updates but can encounter cache thrashing # if read too frequently during concurrent access. To avoid reading so # often, resizing is attempted either when a bin lock is # contended, or upon adding to a bin already holding two or more # nodes (checked before adding in the +x_if_absent+ methods, after # adding in others). Under uniform hash distributions, the # probability of this occurring at threshold is around 13%, # meaning that only about 1 in 8 puts check threshold (and after # resizing, many fewer do so). But this approximation has high # variance for small table sizes, so we check on any collision # for sizes <= 64. The bulk putAll operation further reduces # contention by only committing count updates upon these size # checks. class AtomicReferenceCacheBackend class Table < Util::PowerOfTwoTuple def cas_new_node(i, hash, key, value) cas(i, nil, Node.new(hash, key, value)) end def try_to_cas_in_computed(i, hash, key) succeeded = false new_value = nil new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) if cas(i, nil, new_node) begin if NULL == (new_value = yield(NULL)) was_null = true else new_node.value = new_value end succeeded = true ensure volatile_set(i, nil) if !succeeded || was_null new_node.unlock_via_hash(locked_hash, hash) end end return succeeded, new_value end def try_lock_via_hash(i, node, node_hash) node.try_lock_via_hash(node_hash) do yield if volatile_get(i) == node end end def delete_node_at(i, node, predecessor_node) if predecessor_node predecessor_node.next = node.next else volatile_set(i, node.next) end end end # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do # not contain user keys or values. Otherwise, keys are never +nil+, and # +NULL+ +value+ fields indicate that a node is in the process of being # deleted or created. For purposes of read-only access, a key may be read # before a value, but can only be used after checking value to be +!= NULL+. class Node extend Util::Volatile attr_volatile :hash, :value, :next include Util::CheapLockable bit_shift = Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves # Encodings for special uses of Node hash fields. See above for explanation. MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash SPIN_LOCK_ATTEMPTS = Util::CPU_COUNT > 1 ? Util::CPU_COUNT * 2 : 0 attr_reader :key def initialize(hash, key, value, next_node = nil) super() @key = key self.lazy_set_hash(hash) self.lazy_set_value(value) self.next = next_node end # Spins a while if +LOCKED+ bit set and this node is the first of its bin, # and then sets +WAITING+ bits on hash field and blocks (once) if they are # still set. It is OK for this method to return even if lock is not # available upon exit, which enables these simple single-wait mechanics. # # The corresponding signalling operation is performed within callers: Upon # detecting that +WAITING+ has been set when unlocking lock (via a failed # CAS from non-waiting +LOCKED+ state), unlockers acquire the # +cheap_synchronize+ lock and perform a +cheap_broadcast+. def try_await_lock(table, i) if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? spins = SPIN_LOCK_ATTEMPTS randomizer = base_randomizer = Util::XorShiftRandom.get while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) if spins >= 0 if (randomizer = (randomizer >> 1)).even? # spin at random if (spins -= 1) == 0 Thread.pass # yield before blocking else randomizer = base_randomizer = Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? end end elsif cas_hash(my_hash, my_hash | WAITING) force_aquire_lock(table, i) break end end end end def key?(key) @key.eql?(key) end def matches?(key, hash) pure_hash == hash && key?(key) end def pure_hash hash & HASH_BITS end def try_lock_via_hash(node_hash = hash) if cas_hash(node_hash, locked_hash = node_hash | LOCKED) begin yield ensure unlock_via_hash(locked_hash, node_hash) end end end def locked? self.class.locked_hash?(hash) end def unlock_via_hash(locked_hash, node_hash) unless cas_hash(locked_hash, node_hash) self.hash = node_hash cheap_synchronize { cheap_broadcast } end end private def force_aquire_lock(table, i) cheap_synchronize do if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING cheap_wait else cheap_broadcast # possibly won race vs signaller end end end class << self def locked_hash?(hash) (hash & LOCKED) != 0 end end end # shorthands MOVED = Node::MOVED LOCKED = Node::LOCKED WAITING = Node::WAITING HASH_BITS = Node::HASH_BITS NOW_RESIZING = -1 DEFAULT_CAPACITY = 16 MAX_CAPACITY = Util::MAX_INT # The buffer size for skipped bins during transfers. The # value is arbitrary but should be large enough to avoid # most locking stalls during resizes. TRANSFER_BUFFER_SIZE = 32 extend Util::Volatile attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. # Table initialization and resizing control. When negative, the # table is being initialized or resized. Otherwise, when table is # null, holds the initial table size to use upon creation, or 0 # for default. After initialization, holds the next element count # value upon which to resize the table. :size_control def initialize(options = nil) super() @counter = Util::Adder.new initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity end def get_or_default(key, else_value = nil) hash = key_hash(key) current_table = table while current_table node = current_table.volatile_get_by_hash(hash) current_table = while node if (node_hash = node.hash) == MOVED break node.key elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) return value end node = node.next end end else_value end def [](key) get_or_default(key) end def key?(key) get_or_default(key, NULL) != NULL end def []=(key, value) get_and_set(key, value) value end def compute_if_absent(key) hash = key_hash(key) current_table = table || initialize_table while true if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } if succeeded increment_size return new_value end elsif (node_hash = node.hash) == MOVED current_table = node.key elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) return current_value elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } return value if succeeded end end end def compute_if_present(key) new_value = nil internal_replace(key) do |old_value| if (new_value = yield(NULL == old_value ? nil : old_value)).nil? NULL else new_value end end new_value end def compute(key) internal_compute(key) do |old_value| if (new_value = yield(NULL == old_value ? nil : old_value)).nil? NULL else new_value end end end def merge_pair(key, value) internal_compute(key) do |old_value| if NULL == old_value || !(value = yield(old_value)).nil? value else NULL end end end def replace_pair(key, old_value, new_value) NULL != internal_replace(key, old_value) { new_value } end def replace_if_exists(key, new_value) if (result = internal_replace(key) { new_value }) && NULL != result result end end def get_and_set(key, value) # internalPut in the original CHMV8 hash = key_hash(key) current_table = table || initialize_table while true if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) if current_table.cas_new_node(i, hash, key, value) increment_size break end elsif (node_hash = node.hash) == MOVED current_table = node.key elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) break old_value if succeeded end end end def delete(key) replace_if_exists(key, NULL) end def delete_pair(key, value) result = internal_replace(key, value) { NULL } if result && NULL != result !!result else false end end def each_pair return self unless current_table = table current_table_size = base_size = current_table.size i = base_index = 0 while base_index < base_size if node = current_table.volatile_get(i) if node.hash == MOVED current_table = node.key current_table_size = current_table.size else begin if NULL != (value = node.value) # skip deleted or special nodes yield node.key, value end end while node = node.next end end if (i_with_base = i + base_size) < current_table_size i = i_with_base # visit upper slots if present else i = base_index += 1 end end self end def size (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values end def empty? size == 0 end # Implementation for clear. Steps through each bin, removing all nodes. def clear return self unless current_table = table current_table_size = current_table.size deleted_count = i = 0 while i < current_table_size if !(node = current_table.volatile_get(i)) i += 1 elsif (node_hash = node.hash) == MOVED current_table = node.key current_table_size = current_table.size elsif Node.locked_hash?(node_hash) decrement_size(deleted_count) # opportunistically update count deleted_count = 0 node.try_await_lock(current_table, i) else current_table.try_lock_via_hash(i, node, node_hash) do begin deleted_count += 1 if NULL != node.value # recheck under lock node.value = nil end while node = node.next current_table.volatile_set(i, nil) i += 1 end end end decrement_size(deleted_count) self end private # Internal versions of the insertion methods, each a # little more complicated than the last. All have # the same basic structure: # 1. If table uninitialized, create # 2. If bin empty, try to CAS new node # 3. If bin stale, use new table # 4. Lock and validate; if valid, scan and add or update # # The others interweave other checks and/or alternative actions: # * Plain +get_and_set+ checks for and performs resize after insertion. # * compute_if_absent prescans for mapping without lock (and fails to add # if present), which also makes pre-emptive resize checks worthwhile. # # Someday when details settle down a bit more, it might be worth # some factoring to reduce sprawl. def internal_replace(key, expected_old_value = NULL, &block) hash = key_hash(key) current_table = table while current_table if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) break elsif (node_hash = node.hash) == MOVED current_table = node.key elsif (node_hash & HASH_BITS) != hash && !node.next # precheck break # rules out possible existence elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) return old_value if succeeded end end NULL end def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) current_table.try_lock_via_hash(i, node, node_hash) do predecessor_node = nil old_value = NULL begin if node.matches?(key, hash) && NULL != (current_value = node.value) if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value old_value = current_value if NULL == (node.value = yield(old_value)) current_table.delete_node_at(i, node, predecessor_node) decrement_size end end break end predecessor_node = node end while node = node.next return true, old_value end end def find_value_in_node_list(node, key, hash, pure_hash) do_check_for_resize = false while true if pure_hash == hash && node.key?(key) && NULL != (value = node.value) return value elsif node = node.next do_check_for_resize = true # at least 2 nodes -> check for resize pure_hash = node.pure_hash else return NULL end end ensure check_for_resize if do_check_for_resize end def internal_compute(key, &block) hash = key_hash(key) current_table = table || initialize_table while true if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) if succeeded if NULL == new_value break nil else increment_size break new_value end end elsif (node_hash = node.hash) == MOVED current_table = node.key elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) break new_value if succeeded end end end def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) added = false current_table.try_lock_via_hash(i, node, node_hash) do while true if node.matches?(key, hash) && NULL != (value = node.value) return true, value end last = node unless node = node.next last.next = Node.new(hash, key, value = yield) added = true increment_size return true, value end end end ensure check_for_resize if added end def attempt_compute(key, hash, current_table, i, node, node_hash) added = false current_table.try_lock_via_hash(i, node, node_hash) do predecessor_node = nil while true if node.matches?(key, hash) && NULL != (value = node.value) if NULL == (node.value = value = yield(value)) current_table.delete_node_at(i, node, predecessor_node) decrement_size value = nil end return true, value end predecessor_node = node unless node = node.next if NULL == (value = yield(NULL)) value = nil else predecessor_node.next = Node.new(hash, key, value) added = true increment_size end return true, value end end end ensure check_for_resize if added end def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) node_nesting = nil current_table.try_lock_via_hash(i, node, node_hash) do node_nesting = 1 old_value = nil found_old_value = false while node if node.matches?(key, hash) && NULL != (old_value = node.value) found_old_value = true node.value = value break end last = node unless node = node.next last.next = Node.new(hash, key, value) break end node_nesting += 1 end return true, old_value if found_old_value increment_size true end ensure check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) end def initialize_copy(other) super @counter = Util::Adder.new self.table = nil self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY self end def try_await_lock(current_table, i, node) check_for_resize # try resizing if can't get lock node.try_await_lock(current_table, i) end def key_hash(key) key.hash & HASH_BITS end # Returns a power of two table size for the given desired capacity. def table_size_for(entry_count) size = 2 size <<= 1 while size < entry_count size end # Initializes table, using the size recorded in +size_control+. def initialize_table until current_table ||= table if (size_ctrl = size_control) == NOW_RESIZING Thread.pass # lost initialization race; just spin else try_in_resize_lock(current_table, size_ctrl) do initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY current_table = self.table = Table.new(initial_size) initial_size - (initial_size >> 2) # 75% load factor end end end current_table end # If table is too small and not already resizing, creates next table and # transfers bins. Rechecks occupancy after a transfer to see if another # resize is already needed because resizings are lagging additions. def check_for_resize while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum try_in_resize_lock(current_table, size_ctrl) do self.table = rebuild(current_table) (table_size << 1) - (table_size >> 1) # 75% load factor end end end def try_in_resize_lock(current_table, size_ctrl) if cas_size_control(size_ctrl, NOW_RESIZING) begin if current_table == table # recheck under lock size_ctrl = yield # get new size_control end ensure self.size_control = size_ctrl end end end # Moves and/or copies the nodes in each bin to new table. See above for explanation. def rebuild(table) old_table_size = table.size new_table = table.next_in_size_table # puts "#{old_table_size} -> #{new_table.size}" forwarder = Node.new(MOVED, new_table, NULL) rev_forwarder = nil locked_indexes = nil # holds bins to revisit; nil until needed locked_arr_idx = 0 bin = old_table_size - 1 i = bin while true if !(node = table.volatile_get(i)) # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) elsif Node.locked_hash?(node_hash = node.hash) locked_indexes ||= Array.new if bin < 0 && locked_arr_idx > 0 locked_arr_idx -= 1 i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin redo end if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE node.try_await_lock(table, i) # no other options -- block redo end rev_forwarder ||= Node.new(MOVED, table, NULL) redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list locked_indexes << i new_table.volatile_set(i, rev_forwarder) new_table.volatile_set(i + old_table_size, rev_forwarder) else redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) end if bin > 0 i = (bin -= 1) elsif locked_indexes && !locked_indexes.empty? bin = -1 i = locked_indexes.pop locked_arr_idx = locked_indexes.size - 1 else return new_table end end end def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) # transiently use a locked forwarding node locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) if old_table.cas(i, nil, locked_forwarder) new_table.volatile_set(i, nil) # kill the potential reverse forwarders new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders old_table.volatile_set(i, forwarder) locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) true end end # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. def split_old_bin(table, new_table, i, node, node_hash, forwarder) table.try_lock_via_hash(i, node, node_hash) do split_bin(new_table, i, node, node_hash) table.volatile_set(i, forwarder) end end def split_bin(new_table, i, node, node_hash) bit = new_table.size >> 1 # bit to split on run_bit = node_hash & bit last_run = nil low = nil high = nil current_node = node # this optimises for the lowest amount of volatile writes and objects created while current_node = current_node.next unless (b = current_node.hash & bit) == run_bit run_bit = b last_run = current_node end end if run_bit == 0 low = last_run else high = last_run end current_node = node until current_node == last_run pure_hash = current_node.pure_hash if (pure_hash & bit) == 0 low = Node.new(pure_hash, current_node.key, current_node.value, low) else high = Node.new(pure_hash, current_node.key, current_node.value, high) end current_node = current_node.next end new_table.volatile_set(i, low) new_table.volatile_set(i + bit, high) end def increment_size @counter.increment end def decrement_size(by = 1) @counter.add(-by) end end end thread_safe-0.3.5/lib/thread_safe/util.rb 0000644 0000041 0000041 00000001274 12530443654 020343 0 ustar www-data www-data module ThreadSafe module Util FIXNUM_BIT_SIZE = (0.size * 8) - 2 MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 CPU_COUNT = 16 # is there a way to determine this? autoload :AtomicReference, 'thread_safe/util/atomic_reference' autoload :Adder, 'thread_safe/util/adder' autoload :CheapLockable, 'thread_safe/util/cheap_lockable' autoload :PowerOfTwoTuple, 'thread_safe/util/power_of_two_tuple' autoload :Striped64, 'thread_safe/util/striped64' autoload :Volatile, 'thread_safe/util/volatile' autoload :VolatileTuple, 'thread_safe/util/volatile_tuple' autoload :XorShiftRandom, 'thread_safe/util/xor_shift_random' end end thread_safe-0.3.5/lib/thread_safe/synchronized_delegator.rb 0000644 0000041 0000041 00000003152 12530443654 024130 0 ustar www-data www-data require 'delegate' require 'monitor' # This class provides a trivial way to synchronize all calls to a given object # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls # around the delegated `#send`. Example: # # array = [] # not thread-safe on many impls # array = SynchronizedDelegator.new([]) # thread-safe # # A simple `Monitor` provides a very coarse-grained way to synchronize a given # object, in that it will cause synchronization for methods that have no need # for it, but this is a trivial way to get thread-safety where none may exist # currently on some implementations. # # This class is currently being considered for inclusion into stdlib, via # https://bugs.ruby-lang.org/issues/8556 class SynchronizedDelegator < SimpleDelegator def setup @old_abort = Thread.abort_on_exception Thread.abort_on_exception = true end def teardown Thread.abort_on_exception = @old_abort end def initialize(obj) __setobj__(obj) @monitor = Monitor.new end def method_missing(method, *args, &block) monitor = @monitor begin monitor.enter super ensure monitor.exit end end # Work-around for 1.8 std-lib not passing block around to delegate. # @private def method_missing(method, *args, &block) monitor = @monitor begin monitor.enter target = self.__getobj__ if target.respond_to?(method) target.__send__(method, *args, &block) else super(method, *args, &block) end ensure monitor.exit end end if RUBY_VERSION[0, 3] == '1.8' end unless defined?(SynchronizedDelegator) thread_safe-0.3.5/metadata.yml 0000644 0000041 0000041 00000007725 12530443654 016340 0 ustar www-data www-data --- !ruby/object:Gem::Specification name: thread_safe version: !ruby/object:Gem::Version version: 0.3.5 platform: ruby authors: - Charles Oliver Nutter - thedarkone autorequire: bindir: bin cert_chain: [] date: 2015-03-11 00:00:00.000000000 Z dependencies: - !ruby/object:Gem::Dependency name: atomic requirement: !ruby/object:Gem::Requirement requirements: - - '=' - !ruby/object:Gem::Version version: 1.1.16 type: :development prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - '=' - !ruby/object:Gem::Version version: 1.1.16 - !ruby/object:Gem::Dependency name: rake requirement: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' type: :development prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' - !ruby/object:Gem::Dependency name: minitest requirement: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '4' type: :development prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '4' description: Thread-safe collections and utilities for Ruby email: - headius@headius.com - thedarkone2@gmail.com executables: [] extensions: [] extra_rdoc_files: [] files: - ".travis.yml" - ".yardopts" - Gemfile - LICENSE - README.md - Rakefile - examples/bench_cache.rb - ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java - ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java - ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java - ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java - ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java - ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java - ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java - ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java - ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java - ext/thread_safe/JrubyCacheBackendService.java - lib/thread_safe.rb - lib/thread_safe/atomic_reference_cache_backend.rb - lib/thread_safe/cache.rb - lib/thread_safe/mri_cache_backend.rb - lib/thread_safe/non_concurrent_cache_backend.rb - lib/thread_safe/synchronized_cache_backend.rb - lib/thread_safe/synchronized_delegator.rb - lib/thread_safe/util.rb - lib/thread_safe/util/adder.rb - lib/thread_safe/util/atomic_reference.rb - lib/thread_safe/util/cheap_lockable.rb - lib/thread_safe/util/power_of_two_tuple.rb - lib/thread_safe/util/striped64.rb - lib/thread_safe/util/volatile.rb - lib/thread_safe/util/volatile_tuple.rb - lib/thread_safe/util/xor_shift_random.rb - lib/thread_safe/version.rb - tasks/update_doc.rake - test/src/thread_safe/SecurityManager.java - test/test_array.rb - test/test_cache.rb - test/test_cache_loops.rb - test/test_hash.rb - test/test_helper.rb - test/test_synchronized_delegator.rb - thread_safe.gemspec - yard-template/default/fulldoc/html/css/common.css - yard-template/default/layout/html/footer.erb homepage: https://github.com/ruby-concurrency/thread_safe licenses: - Apache-2.0 metadata: {} post_install_message: rdoc_options: [] require_paths: - lib required_ruby_version: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' required_rubygems_version: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' requirements: [] rubyforge_project: rubygems_version: 2.4.6 signing_key: specification_version: 4 summary: A collection of data structures and utilities to make thread-safe programming in Ruby easier test_files: - test/src/thread_safe/SecurityManager.java - test/test_array.rb - test/test_cache.rb - test/test_cache_loops.rb - test/test_hash.rb - test/test_helper.rb - test/test_synchronized_delegator.rb has_rdoc: thread_safe-0.3.5/test/ 0000755 0000041 0000041 00000000000 12530443654 015001 5 ustar www-data www-data thread_safe-0.3.5/test/test_hash.rb 0000644 0000041 0000041 00000000567 12530443654 017320 0 ustar www-data www-data require 'thread_safe' require File.join(File.dirname(__FILE__), "test_helper") class TestHash < Minitest::Test def test_concurrency hsh = ThreadSafe::Hash.new (1..THREADS).map do |i| Thread.new do 1000.times do |j| hsh[i*1000+j] = i hsh[i*1000+j] hsh.delete(i*1000+j) end end end.map(&:join) end end thread_safe-0.3.5/test/test_cache.rb 0000644 0000041 0000041 00000060701 12530443654 017434 0 ustar www-data www-data require 'thread_safe' require 'thread' require File.join(File.dirname(__FILE__), "test_helper") Thread.abort_on_exception = true class TestCache < Minitest::Test def setup @cache = ThreadSafe::Cache.new end def test_concurrency cache = @cache (1..THREADS).map do |i| Thread.new do 1000.times do |j| key = i*1000+j cache[key] = i cache[key] cache.delete(key) end end end.map(&:join) end def test_retrieval assert_size_change 1 do assert_equal nil, @cache[:a] assert_equal nil, @cache.get(:a) @cache[:a] = 1 assert_equal 1, @cache[:a] assert_equal 1, @cache.get(:a) end end def test_put_if_absent with_or_without_default_proc do assert_size_change 1 do assert_equal nil, @cache.put_if_absent(:a, 1) assert_equal 1, @cache.put_if_absent(:a, 1) assert_equal 1, @cache.put_if_absent(:a, 2) assert_equal 1, @cache[:a] end end end def test_compute_if_absent with_or_without_default_proc do assert_size_change 3 do assert_equal(1, (@cache.compute_if_absent(:a) {1})) assert_equal(1, (@cache.compute_if_absent(:a) {2})) assert_equal 1, @cache[:a] @cache[:b] = nil assert_equal(nil, (@cache.compute_if_absent(:b) {1})) assert_equal(nil, (@cache.compute_if_absent(:c) {})) assert_equal nil, @cache[:c] assert_equal true, @cache.key?(:c) end end end def test_compute_if_absent_with_return with_or_without_default_proc { assert_handles_return_lambda(:compute_if_absent, :a) } end def test_compute_if_absent_exception with_or_without_default_proc { assert_handles_exception(:compute_if_absent, :a) } end def test_compute_if_absent_atomicity late_compute_threads_count = 10 late_put_if_absent_threads_count = 10 getter_threads_count = 5 compute_started = ThreadSafe::Test::Latch.new(1) compute_proceed = ThreadSafe::Test::Latch.new(late_compute_threads_count + late_put_if_absent_threads_count + getter_threads_count) block_until_compute_started = lambda do |name| if (v = @cache[:a]) != nil assert_equal nil, v end compute_proceed.release compute_started.await end assert_size_change 1 do late_compute_threads = Array.new(late_compute_threads_count) do Thread.new do block_until_compute_started.call('compute_if_absent') assert_equal(1, (@cache.compute_if_absent(:a) { flunk })) end end late_put_if_absent_threads = Array.new(late_put_if_absent_threads_count) do Thread.new do block_until_compute_started.call('put_if_absent') assert_equal(1, @cache.put_if_absent(:a, 2)) end end getter_threads = Array.new(getter_threads_count) do Thread.new do block_until_compute_started.call('getter') Thread.pass while @cache[:a].nil? assert_equal 1, @cache[:a] end end Thread.new do @cache.compute_if_absent(:a) do compute_started.release compute_proceed.await sleep(0.2) 1 end end.join (late_compute_threads + late_put_if_absent_threads + getter_threads).each(&:join) end end def test_compute_if_present with_or_without_default_proc do assert_no_size_change do assert_equal(nil, @cache.compute_if_present(:a) {}) assert_equal(nil, @cache.compute_if_present(:a) {1}) assert_equal(nil, @cache.compute_if_present(:a) {flunk}) assert_equal false, @cache.key?(:a) end @cache[:a] = 1 assert_no_size_change do assert_equal(1, @cache.compute_if_present(:a) {1}) assert_equal(1, @cache[:a]) assert_equal(2, @cache.compute_if_present(:a) {2}) assert_equal(2, @cache[:a]) assert_equal(false, @cache.compute_if_present(:a) {false}) assert_equal(false, @cache[:a]) @cache[:a] = 1 yielded = false @cache.compute_if_present(:a) do |old_value| yielded = true assert_equal 1, old_value 2 end assert yielded end assert_size_change -1 do assert_equal(nil, @cache.compute_if_present(:a) {}) assert_equal(false, @cache.key?(:a)) assert_equal(nil, @cache.compute_if_present(:a) {1}) assert_equal(false, @cache.key?(:a)) end end end def test_compute_if_present_with_return with_or_without_default_proc do @cache[:a] = 1 assert_handles_return_lambda(:compute_if_present, :a) end end def test_compute_if_present_exception with_or_without_default_proc do @cache[:a] = 1 assert_handles_exception(:compute_if_present, :a) end end def test_compute with_or_without_default_proc do assert_no_size_change do assert_compute(:a, nil, nil) {} end assert_size_change 1 do assert_compute(:a, nil, 1) {1} assert_compute(:a, 1, 2) {2} assert_compute(:a, 2, false) {false} assert_equal false, @cache[:a] end assert_size_change -1 do assert_compute(:a, false, nil) {} end end end def test_compute_with_return with_or_without_default_proc do assert_handles_return_lambda(:compute, :a) @cache[:a] = 1 assert_handles_return_lambda(:compute, :a) end end def test_compute_exception with_or_without_default_proc do assert_handles_exception(:compute, :a) @cache[:a] = 1 assert_handles_exception(:compute, :a) end end def test_merge_pair with_or_without_default_proc do assert_size_change 1 do assert_equal(nil, @cache.merge_pair(:a, nil) {flunk}) assert_equal true, @cache.key?(:a) assert_equal nil, @cache[:a] end assert_no_size_change do assert_merge_pair(:a, nil, nil, false) {false} assert_merge_pair(:a, nil, false, 1) {1} assert_merge_pair(:a, nil, 1, 2) {2} end assert_size_change -1 do assert_merge_pair(:a, nil, 2, nil) {} assert_equal false, @cache.key?(:a) end end end def test_merge_pair_with_return with_or_without_default_proc do @cache[:a] = 1 assert_handles_return_lambda(:merge_pair, :a, 2) end end def test_merge_pair_exception with_or_without_default_proc do @cache[:a] = 1 assert_handles_exception(:merge_pair, :a, 2) end end def test_updates_dont_block_reads getters_count = 20 key_klass = ThreadSafe::Test::HashCollisionKey keys = [key_klass.new(1, 100), key_klass.new(2, 100), key_klass.new(3, 100)] # hash colliding keys inserted_keys = [] keys.each do |key, i| compute_started = ThreadSafe::Test::Latch.new(1) compute_finished = ThreadSafe::Test::Latch.new(1) getters_started = ThreadSafe::Test::Latch.new(getters_count) getters_finished = ThreadSafe::Test::Latch.new(getters_count) computer_thread = Thread.new do getters_started.await @cache.compute_if_absent(key) do compute_started.release getters_finished.await 1 end compute_finished.release end getter_threads = (1..getters_count).map do Thread.new do getters_started.release inserted_keys.each do |inserted_key| assert_equal true, @cache.key?(inserted_key) assert_equal 1, @cache[inserted_key] end assert_equal false, @cache.key?(key) compute_started.await inserted_keys.each do |inserted_key| assert_equal true, @cache.key?(inserted_key) assert_equal 1, @cache[inserted_key] end assert_equal false, @cache.key?(key) assert_equal nil, @cache[key] getters_finished.release compute_finished.await assert_equal true, @cache.key?(key) assert_equal 1, @cache[key] end end (getter_threads << computer_thread).map {|t| assert(t.join(2))} # asserting no deadlocks inserted_keys << key end end def test_collision_resistance assert_collision_resistance((0..1000).map {|i| ThreadSafe::Test::HashCollisionKey(i, 1)}) end def test_collision_resistance_with_arrays special_array_class = Class.new(Array) do def key # assert_collision_resistance expects to be able to call .key to get the "real" key first.key end end # Test collision resistance with a keys that say they responds_to <=>, but then raise exceptions # when actually called (ie: an Array filled with non-comparable keys). # See https://github.com/headius/thread_safe/issues/19 for more info. assert_collision_resistance((0..100).map do |i| special_array_class.new([ThreadSafe::Test::HashCollisionKeyNonComparable.new(i, 1)]) end) end def test_replace_pair with_or_without_default_proc do assert_no_size_change do assert_equal false, @cache.replace_pair(:a, 1, 2) assert_equal false, @cache.replace_pair(:a, nil, nil) assert_equal false, @cache.key?(:a) end @cache[:a] = 1 assert_no_size_change do assert_equal true, @cache.replace_pair(:a, 1, 2) assert_equal false, @cache.replace_pair(:a, 1, 2) assert_equal 2, @cache[:a] assert_equal true, @cache.replace_pair(:a, 2, 2) assert_equal 2, @cache[:a] assert_equal true, @cache.replace_pair(:a, 2, nil) assert_equal false, @cache.replace_pair(:a, 2, nil) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) assert_equal true, @cache.replace_pair(:a, nil, nil) assert_equal true, @cache.key?(:a) assert_equal true, @cache.replace_pair(:a, nil, 1) assert_equal 1, @cache[:a] end end end def test_replace_if_exists with_or_without_default_proc do assert_no_size_change do assert_equal nil, @cache.replace_if_exists(:a, 1) assert_equal false, @cache.key?(:a) end @cache[:a] = 1 assert_no_size_change do assert_equal 1, @cache.replace_if_exists(:a, 2) assert_equal 2, @cache[:a] assert_equal 2, @cache.replace_if_exists(:a, nil) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) assert_equal nil, @cache.replace_if_exists(:a, 1) assert_equal 1, @cache[:a] end end end def test_get_and_set with_or_without_default_proc do assert_size_change 1 do assert_equal nil, @cache.get_and_set(:a, 1) assert_equal true, @cache.key?(:a) assert_equal 1, @cache[:a] assert_equal 1, @cache.get_and_set(:a, 2) assert_equal 2, @cache.get_and_set(:a, nil) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) assert_equal nil, @cache.get_and_set(:a, 1) assert_equal 1, @cache[:a] end end end def test_key with_or_without_default_proc do assert_equal nil, @cache.key(1) @cache[:a] = 1 assert_equal :a, @cache.key(1) assert_equal nil, @cache.key(0) assert_equal :a, @cache.index(1) if RUBY_VERSION =~ /1\.8/ end end def test_key? with_or_without_default_proc do assert_equal false, @cache.key?(:a) @cache[:a] = 1 assert_equal true, @cache.key?(:a) end end def test_value? with_or_without_default_proc do assert_equal false, @cache.value?(1) @cache[:a] = 1 assert_equal true, @cache.value?(1) end end def test_delete with_or_without_default_proc do |default_proc_set| assert_no_size_change do assert_equal nil, @cache.delete(:a) end @cache[:a] = 1 assert_size_change -1 do assert_equal 1, @cache.delete(:a) end assert_no_size_change do assert_equal nil, @cache[:a] unless default_proc_set assert_equal false, @cache.key?(:a) assert_equal nil, @cache.delete(:a) end end end def test_delete_pair with_or_without_default_proc do assert_no_size_change do assert_equal false, @cache.delete_pair(:a, 2) assert_equal false, @cache.delete_pair(:a, nil) end @cache[:a] = 1 assert_no_size_change do assert_equal false, @cache.delete_pair(:a, 2) end assert_size_change -1 do assert_equal 1, @cache[:a] assert_equal true, @cache.delete_pair(:a, 1) assert_equal false, @cache.delete_pair(:a, 1) assert_equal false, @cache.key?(:a) end end end def test_default_proc @cache = cache_with_default_proc(1) assert_no_size_change do assert_equal false, @cache.key?(:a) end assert_size_change 1 do assert_equal 1, @cache[:a] assert_equal true, @cache.key?(:a) end end def test_falsy_default_proc @cache = cache_with_default_proc(nil) assert_no_size_change do assert_equal false, @cache.key?(:a) end assert_size_change 1 do assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) end end def test_fetch with_or_without_default_proc do |default_proc_set| assert_no_size_change do assert_equal 1, @cache.fetch(:a, 1) assert_equal(1, (@cache.fetch(:a) {1})) assert_equal false, @cache.key?(:a) assert_equal nil, @cache[:a] unless default_proc_set end @cache[:a] = 1 assert_no_size_change do assert_equal(1, (@cache.fetch(:a) {flunk})) end assert_raises(ThreadSafe::Cache::KEY_ERROR) do @cache.fetch(:b) end assert_no_size_change do assert_equal 1, (@cache.fetch(:b, :c) {1}) # assert block supersedes default value argument assert_equal false, @cache.key?(:b) end end end def test_falsy_fetch with_or_without_default_proc do assert_equal false, @cache.key?(:a) assert_no_size_change do assert_equal(nil, @cache.fetch(:a, nil)) assert_equal(false, @cache.fetch(:a, false)) assert_equal(nil, (@cache.fetch(:a) {})) assert_equal(false, (@cache.fetch(:a) {false})) end @cache[:a] = nil assert_no_size_change do assert_equal true, @cache.key?(:a) assert_equal(nil, (@cache.fetch(:a) {flunk})) end end end def test_fetch_with_return with_or_without_default_proc do r = lambda do @cache.fetch(:a) { return 10 } end.call assert_no_size_change do assert_equal 10, r assert_equal false, @cache.key?(:a) end end end def test_fetch_or_store with_or_without_default_proc do |default_proc_set| assert_size_change 1 do assert_equal 1, @cache.fetch_or_store(:a, 1) assert_equal 1, @cache[:a] end @cache.delete(:a) assert_size_change 1 do assert_equal 1, (@cache.fetch_or_store(:a) {1}) assert_equal 1, @cache[:a] end assert_no_size_change do assert_equal(1, (@cache.fetch_or_store(:a) {flunk})) end assert_raises(ThreadSafe::Cache::KEY_ERROR) do @cache.fetch_or_store(:b) end assert_size_change 1 do assert_equal 1, (@cache.fetch_or_store(:b, :c) {1}) # assert block supersedes default value argument assert_equal 1, @cache[:b] end end end def test_falsy_fetch_or_store with_or_without_default_proc do assert_equal false, @cache.key?(:a) assert_size_change 1 do assert_equal(nil, @cache.fetch_or_store(:a, nil)) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) end @cache.delete(:a) assert_size_change 1 do assert_equal(false, @cache.fetch_or_store(:a, false)) assert_equal false, @cache[:a] assert_equal true, @cache.key?(:a) end @cache.delete(:a) assert_size_change 1 do assert_equal(nil, (@cache.fetch_or_store(:a) {})) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) end @cache.delete(:a) assert_size_change 1 do assert_equal(false, (@cache.fetch_or_store(:a) {false})) assert_equal false, @cache[:a] assert_equal true, @cache.key?(:a) end @cache[:a] = nil assert_no_size_change do assert_equal(nil, (@cache.fetch_or_store(:a) {flunk})) end end end def test_fetch_or_store_with_return with_or_without_default_proc do r = lambda do @cache.fetch_or_store(:a) { return 10 } end.call assert_no_size_change do assert_equal 10, r assert_equal false, @cache.key?(:a) end end end def test_clear @cache[:a] = 1 assert_size_change -1 do assert_equal @cache, @cache.clear assert_equal false, @cache.key?(:a) assert_equal nil, @cache[:a] end end def test_each_pair @cache.each_pair {|k, v| flunk} assert_equal(@cache, (@cache.each_pair {})) @cache[:a] = 1 h = {} @cache.each_pair {|k, v| h[k] = v} assert_equal({:a => 1}, h) @cache[:b] = 2 h = {} @cache.each_pair {|k, v| h[k] = v} assert_equal({:a => 1, :b => 2}, h) end def test_each_pair_iterator @cache[:a] = 1 @cache[:b] = 2 i = 0 r = @cache.each_pair do |k, v| if i == 0 i += 1 next flunk elsif i == 1 break :breaked end end assert_equal :breaked, r end def test_each_pair_allows_modification @cache[:a] = 1 @cache[:b] = 1 @cache[:c] = 1 assert_size_change 1 do @cache.each_pair do |k, v| @cache[:z] = 1 end end end def test_keys assert_equal [], @cache.keys @cache[1] = 1 assert_equal [1], @cache.keys @cache[2] = 2 assert_equal [1, 2], @cache.keys.sort end def test_values assert_equal [], @cache.values @cache[1] = 1 assert_equal [1], @cache.values @cache[2] = 2 assert_equal [1, 2], @cache.values.sort end def test_each_key assert_equal(@cache, (@cache.each_key {flunk})) @cache[1] = 1 arr = [] @cache.each_key {|k| arr << k} assert_equal [1], arr @cache[2] = 2 arr = [] @cache.each_key {|k| arr << k} assert_equal [1, 2], arr.sort end def test_each_value assert_equal(@cache, (@cache.each_value {flunk})) @cache[1] = 1 arr = [] @cache.each_value {|k| arr << k} assert_equal [1], arr @cache[2] = 2 arr = [] @cache.each_value {|k| arr << k} assert_equal [1, 2], arr.sort end def test_empty assert_equal true, @cache.empty? @cache[:a] = 1 assert_equal false, @cache.empty? end def test_options_validation assert_valid_options(nil) assert_valid_options({}) assert_valid_options(:foo => :bar) end def test_initial_capacity_options_validation assert_valid_option(:initial_capacity, nil) assert_valid_option(:initial_capacity, 1) assert_invalid_option(:initial_capacity, '') assert_invalid_option(:initial_capacity, 1.0) assert_invalid_option(:initial_capacity, -1) end def test_load_factor_options_validation assert_valid_option(:load_factor, nil) assert_valid_option(:load_factor, 0.01) assert_valid_option(:load_factor, 0.75) assert_valid_option(:load_factor, 1) assert_invalid_option(:load_factor, '') assert_invalid_option(:load_factor, 0) assert_invalid_option(:load_factor, 1.1) assert_invalid_option(:load_factor, 2) assert_invalid_option(:load_factor, -1) end def test_size assert_equal 0, @cache.size @cache[:a] = 1 assert_equal 1, @cache.size @cache[:b] = 1 assert_equal 2, @cache.size @cache.delete(:a) assert_equal 1, @cache.size @cache.delete(:b) assert_equal 0, @cache.size end def test_get_or_default with_or_without_default_proc do assert_equal 1, @cache.get_or_default(:a, 1) assert_equal nil, @cache.get_or_default(:a, nil) assert_equal false, @cache.get_or_default(:a, false) assert_equal false, @cache.key?(:a) @cache[:a] = 1 assert_equal 1, @cache.get_or_default(:a, 2) end end def test_dup_clone [:dup, :clone].each do |meth| cache = cache_with_default_proc(:default_value) cache[:a] = 1 dupped = cache.send(meth) assert_equal 1, dupped[:a] assert_equal 1, dupped.size assert_size_change 1, cache do assert_no_size_change dupped do cache[:b] = 1 end end assert_equal false, dupped.key?(:b) assert_no_size_change cache do assert_size_change -1, dupped do dupped.delete(:a) end end assert_equal false, dupped.key?(:a) assert_equal true, cache.key?(:a) # test default proc assert_size_change 1, cache do assert_no_size_change dupped do assert_equal :default_value, cache[:c] assert_equal false, dupped.key?(:c) end end assert_no_size_change cache do assert_size_change 1, dupped do assert_equal :default_value, dupped[:d] assert_equal false, cache.key?(:d) end end end end def test_is_unfreezable assert_raises(NoMethodError) { @cache.freeze } end def test_marshal_dump_load new_cache = Marshal.load(Marshal.dump(@cache)) assert_instance_of ThreadSafe::Cache, new_cache assert_equal 0, new_cache.size @cache[:a] = 1 new_cache = Marshal.load(Marshal.dump(@cache)) assert_equal 1, @cache[:a] assert_equal 1, new_cache.size end def test_marshal_dump_doesnt_work_with_default_proc assert_raises(TypeError) do Marshal.dump(ThreadSafe::Cache.new {}) end end private def with_or_without_default_proc yield false @cache = ThreadSafe::Cache.new {|h, k| h[k] = :default_value} yield true end def cache_with_default_proc(default_value = 1) ThreadSafe::Cache.new {|cache, k| cache[k] = default_value} end def assert_valid_option(option_name, value) assert_valid_options(option_name => value) end def assert_valid_options(options) c = ThreadSafe::Cache.new(options) assert_instance_of ThreadSafe::Cache, c end def assert_invalid_option(option_name, value) assert_invalid_options(option_name => value) end def assert_invalid_options(options) assert_raises(ArgumentError) { ThreadSafe::Cache.new(options) } end def assert_size_change(change, cache = @cache) start = cache.size yield assert_equal change, cache.size - start end def assert_no_size_change(cache = @cache, &block) assert_size_change(0, cache, &block) end def assert_handles_return_lambda(method, key, *args) before_had_key = @cache.key?(key) before_had_value = before_had_key ? @cache[key] : nil returning_lambda = lambda do @cache.send(method, key, *args) { return :direct_return } end assert_no_size_change do assert_equal(:direct_return, returning_lambda.call) assert_equal before_had_key, @cache.key?(key) assert_equal before_had_value, @cache[key] if before_had_value end end class TestException < Exception; end def assert_handles_exception(method, key, *args) before_had_key = @cache.key?(key) before_had_value = before_had_key ? @cache[key] : nil assert_no_size_change do assert_raises(TestException) do @cache.send(method, key, *args) { raise TestException, '' } end assert_equal before_had_key, @cache.key?(key) assert_equal before_had_value, @cache[key] if before_had_value end end def assert_compute(key, expected_old_value, expected_result) result = @cache.compute(:a) do |old_value| assert_equal expected_old_value, old_value yield end assert_equal expected_result, result end def assert_merge_pair(key, value, expected_old_value, expected_result) result = @cache.merge_pair(key, value) do |old_value| assert_equal expected_old_value, old_value yield end assert_equal expected_result, result end def assert_collision_resistance(keys) keys.each {|k| @cache[k] = k.key} 10.times do |i| size = keys.size while i < size k = keys[i] assert(k.key == @cache.delete(k) && !@cache.key?(k) && (@cache[k] = k.key; @cache[k] == k.key)) i += 10 end end assert(keys.all? {|k| @cache[k] == k.key}) end end thread_safe-0.3.5/test/src/ 0000755 0000041 0000041 00000000000 12530443654 015570 5 ustar www-data www-data thread_safe-0.3.5/test/src/thread_safe/ 0000755 0000041 0000041 00000000000 12530443654 020035 5 ustar www-data www-data thread_safe-0.3.5/test/src/thread_safe/SecurityManager.java 0000644 0000041 0000041 00000000750 12530443654 024004 0 ustar www-data www-data package thread_safe; import java.security.Permission; import java.util.ArrayList; import java.util.List; public class SecurityManager extends java.lang.SecurityManager { private final ListUsages of this class should typically be of the form: * {@code ThreadLocalRandom.current().nextX(...)} (where * {@code X} is {@code Int}, {@code Long}, etc). * When all usages are of this form, it is never possible to * accidently share a {@code ThreadLocalRandom} across multiple threads. * *
This class also provides additional commonly used bounded random
* generation methods.
*
* @since 1.7
* @author Doug Lea
*/
public class ThreadLocalRandom extends Random {
// same constants as Random, but must be redeclared because private
private static final long multiplier = 0x5DEECE66DL;
private static final long addend = 0xBL;
private static final long mask = (1L << 48) - 1;
/**
* The random seed. We can't use super.seed.
*/
private long rnd;
/**
* Initialization flag to permit calls to setSeed to succeed only
* while executing the Random constructor. We can't allow others
* since it would cause setting seed in one part of a program to
* unintentionally impact other usages by the thread.
*/
boolean initialized;
// Padding to help avoid memory contention among seed updates in
// different TLRs in the common case that they are located near
// each other.
private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
/**
* The actual ThreadLocal
*/
private static final ThreadLocal This class is usually preferable to {@link AtomicLong} when
* multiple threads update a common sum that is used for purposes such
* as collecting statistics, not for fine-grained synchronization
* control. Under low update contention, the two classes have similar
* characteristics. But under high contention, expected throughput of
* this class is significantly higher, at the expense of higher space
* consumption.
*
* This class extends {@link Number}, but does not define
* methods such as {@code hashCode} and {@code compareTo} because
* instances are expected to be mutated, and so are not useful as
* collection keys.
*
* jsr166e note: This class is targeted to be placed in
* java.util.concurrent.atomic.
*
* @since 1.8
* @author Doug Lea
*/
public class LongAdder extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Version of plus for use in retryUpdate
*/
final long fn(long v, long x) { return v + x; }
/**
* Creates a new adder with initial sum of zero.
*/
public LongAdder() {
}
/**
* Adds the given value.
*
* @param x the value to add
*/
public void add(long x) {
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null || !casBase(b = base, b + x)) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
!(uncontended = a.cas(v = a.value, v + x)))
retryUpdate(x, hc, uncontended);
}
}
/**
* Equivalent to {@code add(1)}.
*/
public void increment() {
add(1L);
}
/**
* Equivalent to {@code add(-1)}.
*/
public void decrement() {
add(-1L);
}
/**
* Returns the current sum. The returned value is NOT an
* atomic snapshot: Invocation in the absence of concurrent
* updates returns an accurate result, but concurrent updates that
* occur while the sum is being calculated might not be
* incorporated.
*
* @return the sum
*/
public long sum() {
long sum = base;
Cell[] as = cells;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null)
sum += a.value;
}
}
return sum;
}
/**
* Resets variables maintaining the sum to zero. This method may
* be a useful alternative to creating a new adder, but is only
* effective if there are no concurrent updates. Because this
* method is intrinsically racy, it should only be used when it is
* known that no threads are concurrently updating.
*/
public void reset() {
internalReset(0L);
}
/**
* Equivalent in effect to {@link #sum} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* not guaranteed to be the final value occurring before
* the reset.
*
* @return the sum
*/
public long sumThenReset() {
long sum = base;
Cell[] as = cells;
base = 0L;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
sum += a.value;
a.value = 0L;
}
}
}
return sum;
}
/**
* Returns the String representation of the {@link #sum}.
* @return the String representation of the {@link #sum}
*/
public String toString() {
return Long.toString(sum());
}
/**
* Equivalent to {@link #sum}.
*
* @return the sum
*/
public long longValue() {
return sum();
}
/**
* Returns the {@link #sum} as an {@code int} after a narrowing
* primitive conversion.
*/
public int intValue() {
return (int)sum();
}
/**
* Returns the {@link #sum} as a {@code float}
* after a widening primitive conversion.
*/
public float floatValue() {
return (float)sum();
}
/**
* Returns the {@link #sum} as a {@code double} after a widening
* primitive conversion.
*/
public double doubleValue() {
return (double)sum();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeLong(sum());
}
private void readObject(ObjectInputStream s)
throws IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = s.readLong();
}
}
thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java 0000644 0000041 0000041 00000464216 12530443654 031311 0 ustar www-data www-data /*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
// This is based on the 1.79 version.
package org.jruby.ext.thread_safe.jsr166e.nounsafe;
import org.jruby.RubyClass;
import org.jruby.RubyNumeric;
import org.jruby.RubyObject;
import org.jruby.exceptions.RaiseException;
import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap;
import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom;
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.builtin.IRubyObject;
import java.util.Arrays;
import java.util.Map;
import java.util.Set;
import java.util.Collection;
import java.util.Hashtable;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Enumeration;
import java.util.ConcurrentModificationException;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import java.io.Serializable;
/**
* A hash table supporting full concurrency of retrievals and
* high expected concurrency for updates. This class obeys the
* same functional specification as {@link java.util.Hashtable}, and
* includes versions of methods corresponding to each method of
* {@code Hashtable}. However, even though all operations are
* thread-safe, retrieval operations do not entail locking,
* and there is not any support for locking the entire table
* in a way that prevents all access. This class is fully
* interoperable with {@code Hashtable} in programs that rely on its
* thread safety but not on its synchronization details.
*
* Retrieval operations (including {@code get}) generally do not
* block, so may overlap with update operations (including {@code put}
* and {@code remove}). Retrievals reflect the results of the most
* recently completed update operations holding upon their
* onset. (More formally, an update operation for a given key bears a
* happens-before relation with any (non-null) retrieval for
* that key reporting the updated value.) For aggregate operations
* such as {@code putAll} and {@code clear}, concurrent retrievals may
* reflect insertion or removal of only some entries. Similarly,
* Iterators and Enumerations return elements reflecting the state of
* the hash table at some point at or since the creation of the
* iterator/enumeration. They do not throw {@link
* ConcurrentModificationException}. However, iterators are designed
* to be used by only one thread at a time. Bear in mind that the
* results of aggregate status methods including {@code size}, {@code
* isEmpty}, and {@code containsValue} are typically useful only when
* a map is not undergoing concurrent updates in other threads.
* Otherwise the results of these methods reflect transient states
* that may be adequate for monitoring or estimation purposes, but not
* for program control.
*
* The table is dynamically expanded when there are too many
* collisions (i.e., keys that have distinct hash codes but fall into
* the same slot modulo the table size), with the expected average
* effect of maintaining roughly two bins per mapping (corresponding
* to a 0.75 load factor threshold for resizing). There may be much
* variance around this average as mappings are added and removed, but
* overall, this maintains a commonly accepted time/space tradeoff for
* hash tables. However, resizing this or any other kind of hash
* table may be a relatively slow operation. When possible, it is a
* good idea to provide a size estimate as an optional {@code
* initialCapacity} constructor argument. An additional optional
* {@code loadFactor} constructor argument provides a further means of
* customizing initial table capacity by specifying the table density
* to be used in calculating the amount of space to allocate for the
* given number of elements. Also, for compatibility with previous
* versions of this class, constructors may optionally specify an
* expected {@code concurrencyLevel} as an additional hint for
* internal sizing. Note that using many keys with exactly the same
* {@code hashCode()} is a sure way to slow down performance of any
* hash table.
*
* A {@link Set} projection of a ConcurrentHashMapV8 may be created
* (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
* (using {@link #keySet(Object)} when only keys are of interest, and the
* mapped values are (perhaps transiently) not used or all take the
* same mapping value.
*
* A ConcurrentHashMapV8 can be used as scalable frequency map (a
* form of histogram or multiset) by using {@link LongAdder} values
* and initializing via {@link #computeIfAbsent}. For example, to add
* a count to a {@code ConcurrentHashMapV8 This class and its views and iterators implement all of the
* optional methods of the {@link Map} and {@link Iterator}
* interfaces.
*
* Like {@link Hashtable} but unlike {@link HashMap}, this class
* does not allow {@code null} to be used as a key or value.
*
* ConcurrentHashMapV8s support parallel operations using the {@link
* ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
* are available in class {@link ForkJoinTasks}). These operations are
* designed to be safely, and often sensibly, applied even with maps
* that are being concurrently updated by other threads; for example,
* when computing a snapshot summary of the values in a shared
* registry. There are three kinds of operation, each with four
* forms, accepting functions with Keys, Values, Entries, and (Key,
* Value) arguments and/or return values. (The first three forms are
* also available via the {@link #keySet()}, {@link #values()} and
* {@link #entrySet()} views). Because the elements of a
* ConcurrentHashMapV8 are not ordered in any particular way, and may be
* processed in different orders in different parallel executions, the
* correctness of supplied functions should not depend on any
* ordering, or on any other objects or values that may transiently
* change while computation is in progress; and except for forEach
* actions, should ideally be side-effect-free.
*
* The concurrency properties of bulk operations follow
* from those of ConcurrentHashMapV8: Any non-null result returned
* from {@code get(key)} and related access methods bears a
* happens-before relation with the associated insertion or
* update. The result of any bulk operation reflects the
* composition of these per-element relations (but is not
* necessarily atomic with respect to the map as a whole unless it
* is somehow known to be quiescent). Conversely, because keys
* and values in the map are never null, null serves as a reliable
* atomic indicator of the current lack of any result. To
* maintain this property, null serves as an implicit basis for
* all non-scalar reduction operations. For the double, long, and
* int versions, the basis should be one that, when combined with
* any other value, returns that other value (more formally, it
* should be the identity element for the reduction). Most common
* reductions have these properties; for example, computing a sum
* with basis 0 or a minimum with basis MAX_VALUE.
*
* Search and transformation functions provided as arguments
* should similarly return null to indicate the lack of any result
* (in which case it is not used). In the case of mapped
* reductions, this also enables transformations to serve as
* filters, returning null (or, in the case of primitive
* specializations, the identity basis) if the element should not
* be combined. You can create compound transformations and
* filterings by composing them yourself under this "null means
* there is nothing there now" rule before using them in search or
* reduce operations.
*
* Methods accepting and/or returning Entry arguments maintain
* key-value associations. They may be useful for example when
* finding the key for the greatest value. Note that "plain" Entry
* arguments can be supplied using {@code new
* AbstractMap.SimpleEntry(k,v)}.
*
* Bulk operations may complete abruptly, throwing an
* exception encountered in the application of a supplied
* function. Bear in mind when handling such exceptions that other
* concurrently executing functions could also have thrown
* exceptions, or would have done so if the first exception had
* not occurred.
*
* Parallel speedups for bulk operations compared to sequential
* processing are common but not guaranteed. Operations involving
* brief functions on small maps may execute more slowly than
* sequential loops if the underlying work to parallelize the
* computation is more expensive than the computation itself.
* Similarly, parallelization may not lead to much actual parallelism
* if all processors are busy performing unrelated tasks.
*
* All arguments to all task methods must be non-null.
*
* jsr166e note: During transition, this class
* uses nested functional interfaces with different names but the
* same forms as those expected for JDK8.
*
* This class is a member of the
*
* Java Collections Framework.
*
* @since 1.5
* @author Doug Lea
* @param This interface exports a subset of expected JDK8
* functionality.
*
* Sample usage: Here is one (of the several) ways to compute
* the sum of the values held in a map using the ForkJoin
* framework. As illustrated here, Spliterators are well suited to
* designs in which a task repeatedly splits off half its work
* into forked subtasks until small enough to process directly,
* and then joins these subtasks. Variants of this style can also
* be used in completion-based designs.
*
* More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code key.equals(k)},
* then this method returns {@code v}; otherwise it returns
* {@code null}. (There can be at most one such mapping.)
*
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked") public V get(Object key) {
if (key == null)
throw new NullPointerException();
return (V)internalGet(key);
}
/**
* Returns the value to which the specified key is mapped,
* or the given defaultValue if this map contains no mapping for the key.
*
* @param key the key
* @param defaultValue the value to return if this map contains
* no mapping for the given key
* @return the mapping for the key, if present; else the defaultValue
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
if (key == null)
throw new NullPointerException();
V v = (V) internalGet(key);
return v == null ? defaultValue : v;
}
/**
* Tests if the specified object is a key in this table.
*
* @param key possible key
* @return {@code true} if and only if the specified object
* is a key in this table, as determined by the
* {@code equals} method; {@code false} otherwise
* @throws NullPointerException if the specified key is null
*/
public boolean containsKey(Object key) {
if (key == null)
throw new NullPointerException();
return internalGet(key) != null;
}
/**
* Returns {@code true} if this map maps one or more keys to the
* specified value. Note: This method may require a full traversal
* of the map, and is much slower than method {@code containsKey}.
*
* @param value value whose presence in this map is to be tested
* @return {@code true} if this map maps one or more keys to the
* specified value
* @throws NullPointerException if the specified value is null
*/
public boolean containsValue(Object value) {
if (value == null)
throw new NullPointerException();
Object v;
Traverser The value can be retrieved by calling the {@code get} method
* with a key that is equal to the original key.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked") public V put(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return (V)internalPut(key, value);
}
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key,
* or {@code null} if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return (V)internalPutIfAbsent(key, value);
}
/**
* Copies all of the mappings from the specified map to this one.
* These mappings replace any mappings that this map had for any of the
* keys currently in the specified map.
*
* @param m mappings to be stored in this map
*/
public void putAll(Map extends K, ? extends V> m) {
internalPutAll(m);
}
/**
* If the specified key is not already associated with a value,
* computes its value using the given mappingFunction and enters
* it into the map unless null. This is equivalent to
* The view's {@code iterator} is a "weakly consistent" iterator
* that will never throw {@link ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
*/
public Set The view's {@code iterator} is a "weakly consistent" iterator
* that will never throw {@link ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
*/
public static final class ValuesView This class is usually preferable to {@link AtomicLong} when
* multiple threads update a common sum that is used for purposes such
* as collecting statistics, not for fine-grained synchronization
* control. Under low update contention, the two classes have similar
* characteristics. But under high contention, expected throughput of
* this class is significantly higher, at the expense of higher space
* consumption.
*
* This class extends {@link Number}, but does not define
* methods such as {@code hashCode} and {@code compareTo} because
* instances are expected to be mutated, and so are not useful as
* collection keys.
*
* jsr166e note: This class is targeted to be placed in
* java.util.concurrent.atomic.
*
* @since 1.8
* @author Doug Lea
*/
public class LongAdder extends Striped64 implements Serializable {
private static final long serialVersionUID = 7249069246863182397L;
/**
* Version of plus for use in retryUpdate
*/
final long fn(long v, long x) { return v + x; }
/**
* Creates a new adder with initial sum of zero.
*/
public LongAdder() {
}
/**
* Adds the given value.
*
* @param x the value to add
*/
public void add(long x) {
Cell[] as; long b, v; HashCode hc; Cell a; int n;
if ((as = cells) != null || !casBase(b = base, b + x)) {
boolean uncontended = true;
int h = (hc = threadHashCode.get()).code;
if (as == null || (n = as.length) < 1 ||
(a = as[(n - 1) & h]) == null ||
!(uncontended = a.cas(v = a.value, v + x)))
retryUpdate(x, hc, uncontended);
}
}
/**
* Equivalent to {@code add(1)}.
*/
public void increment() {
add(1L);
}
/**
* Equivalent to {@code add(-1)}.
*/
public void decrement() {
add(-1L);
}
/**
* Returns the current sum. The returned value is NOT an
* atomic snapshot: Invocation in the absence of concurrent
* updates returns an accurate result, but concurrent updates that
* occur while the sum is being calculated might not be
* incorporated.
*
* @return the sum
*/
public long sum() {
long sum = base;
Cell[] as = cells;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null)
sum += a.value;
}
}
return sum;
}
/**
* Resets variables maintaining the sum to zero. This method may
* be a useful alternative to creating a new adder, but is only
* effective if there are no concurrent updates. Because this
* method is intrinsically racy, it should only be used when it is
* known that no threads are concurrently updating.
*/
public void reset() {
internalReset(0L);
}
/**
* Equivalent in effect to {@link #sum} followed by {@link
* #reset}. This method may apply for example during quiescent
* points between multithreaded computations. If there are
* updates concurrent with this method, the returned value is
* not guaranteed to be the final value occurring before
* the reset.
*
* @return the sum
*/
public long sumThenReset() {
long sum = base;
Cell[] as = cells;
base = 0L;
if (as != null) {
int n = as.length;
for (int i = 0; i < n; ++i) {
Cell a = as[i];
if (a != null) {
sum += a.value;
a.value = 0L;
}
}
}
return sum;
}
/**
* Returns the String representation of the {@link #sum}.
* @return the String representation of the {@link #sum}
*/
public String toString() {
return Long.toString(sum());
}
/**
* Equivalent to {@link #sum}.
*
* @return the sum
*/
public long longValue() {
return sum();
}
/**
* Returns the {@link #sum} as an {@code int} after a narrowing
* primitive conversion.
*/
public int intValue() {
return (int)sum();
}
/**
* Returns the {@link #sum} as a {@code float}
* after a widening primitive conversion.
*/
public float floatValue() {
return (float)sum();
}
/**
* Returns the {@link #sum} as a {@code double} after a widening
* primitive conversion.
*/
public double doubleValue() {
return (double)sum();
}
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
s.defaultWriteObject();
s.writeLong(sum());
}
private void readObject(ObjectInputStream s)
throws IOException, ClassNotFoundException {
s.defaultReadObject();
busy = 0;
cells = null;
base = s.readLong();
}
}
thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java 0000644 0000041 0000041 00000467110 12530443654 027467 0 ustar www-data www-data /*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
// This is based on the 1.79 version.
package org.jruby.ext.thread_safe.jsr166e;
import org.jruby.RubyClass;
import org.jruby.RubyNumeric;
import org.jruby.RubyObject;
import org.jruby.exceptions.RaiseException;
import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom;
import org.jruby.runtime.ThreadContext;
import org.jruby.runtime.builtin.IRubyObject;
import java.util.Arrays;
import java.util.Map;
import java.util.Set;
import java.util.Collection;
import java.util.Hashtable;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Enumeration;
import java.util.ConcurrentModificationException;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import java.io.Serializable;
/**
* A hash table supporting full concurrency of retrievals and
* high expected concurrency for updates. This class obeys the
* same functional specification as {@link java.util.Hashtable}, and
* includes versions of methods corresponding to each method of
* {@code Hashtable}. However, even though all operations are
* thread-safe, retrieval operations do not entail locking,
* and there is not any support for locking the entire table
* in a way that prevents all access. This class is fully
* interoperable with {@code Hashtable} in programs that rely on its
* thread safety but not on its synchronization details.
*
* Retrieval operations (including {@code get}) generally do not
* block, so may overlap with update operations (including {@code put}
* and {@code remove}). Retrievals reflect the results of the most
* recently completed update operations holding upon their
* onset. (More formally, an update operation for a given key bears a
* happens-before relation with any (non-null) retrieval for
* that key reporting the updated value.) For aggregate operations
* such as {@code putAll} and {@code clear}, concurrent retrievals may
* reflect insertion or removal of only some entries. Similarly,
* Iterators and Enumerations return elements reflecting the state of
* the hash table at some point at or since the creation of the
* iterator/enumeration. They do not throw {@link
* ConcurrentModificationException}. However, iterators are designed
* to be used by only one thread at a time. Bear in mind that the
* results of aggregate status methods including {@code size}, {@code
* isEmpty}, and {@code containsValue} are typically useful only when
* a map is not undergoing concurrent updates in other threads.
* Otherwise the results of these methods reflect transient states
* that may be adequate for monitoring or estimation purposes, but not
* for program control.
*
* The table is dynamically expanded when there are too many
* collisions (i.e., keys that have distinct hash codes but fall into
* the same slot modulo the table size), with the expected average
* effect of maintaining roughly two bins per mapping (corresponding
* to a 0.75 load factor threshold for resizing). There may be much
* variance around this average as mappings are added and removed, but
* overall, this maintains a commonly accepted time/space tradeoff for
* hash tables. However, resizing this or any other kind of hash
* table may be a relatively slow operation. When possible, it is a
* good idea to provide a size estimate as an optional {@code
* initialCapacity} constructor argument. An additional optional
* {@code loadFactor} constructor argument provides a further means of
* customizing initial table capacity by specifying the table density
* to be used in calculating the amount of space to allocate for the
* given number of elements. Also, for compatibility with previous
* versions of this class, constructors may optionally specify an
* expected {@code concurrencyLevel} as an additional hint for
* internal sizing. Note that using many keys with exactly the same
* {@code hashCode()} is a sure way to slow down performance of any
* hash table.
*
* A {@link Set} projection of a ConcurrentHashMapV8 may be created
* (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
* (using {@link #keySet(Object)} when only keys are of interest, and the
* mapped values are (perhaps transiently) not used or all take the
* same mapping value.
*
* A ConcurrentHashMapV8 can be used as scalable frequency map (a
* form of histogram or multiset) by using {@link LongAdder} values
* and initializing via {@link #computeIfAbsent}. For example, to add
* a count to a {@code ConcurrentHashMapV8 This class and its views and iterators implement all of the
* optional methods of the {@link Map} and {@link Iterator}
* interfaces.
*
* Like {@link Hashtable} but unlike {@link HashMap}, this class
* does not allow {@code null} to be used as a key or value.
*
* ConcurrentHashMapV8s support parallel operations using the {@link
* ForkJoinPool#commonPool}. (Tasks that may be used in other contexts
* are available in class {@link ForkJoinTasks}). These operations are
* designed to be safely, and often sensibly, applied even with maps
* that are being concurrently updated by other threads; for example,
* when computing a snapshot summary of the values in a shared
* registry. There are three kinds of operation, each with four
* forms, accepting functions with Keys, Values, Entries, and (Key,
* Value) arguments and/or return values. (The first three forms are
* also available via the {@link #keySet()}, {@link #values()} and
* {@link #entrySet()} views). Because the elements of a
* ConcurrentHashMapV8 are not ordered in any particular way, and may be
* processed in different orders in different parallel executions, the
* correctness of supplied functions should not depend on any
* ordering, or on any other objects or values that may transiently
* change while computation is in progress; and except for forEach
* actions, should ideally be side-effect-free.
*
* The concurrency properties of bulk operations follow
* from those of ConcurrentHashMapV8: Any non-null result returned
* from {@code get(key)} and related access methods bears a
* happens-before relation with the associated insertion or
* update. The result of any bulk operation reflects the
* composition of these per-element relations (but is not
* necessarily atomic with respect to the map as a whole unless it
* is somehow known to be quiescent). Conversely, because keys
* and values in the map are never null, null serves as a reliable
* atomic indicator of the current lack of any result. To
* maintain this property, null serves as an implicit basis for
* all non-scalar reduction operations. For the double, long, and
* int versions, the basis should be one that, when combined with
* any other value, returns that other value (more formally, it
* should be the identity element for the reduction). Most common
* reductions have these properties; for example, computing a sum
* with basis 0 or a minimum with basis MAX_VALUE.
*
* Search and transformation functions provided as arguments
* should similarly return null to indicate the lack of any result
* (in which case it is not used). In the case of mapped
* reductions, this also enables transformations to serve as
* filters, returning null (or, in the case of primitive
* specializations, the identity basis) if the element should not
* be combined. You can create compound transformations and
* filterings by composing them yourself under this "null means
* there is nothing there now" rule before using them in search or
* reduce operations.
*
* Methods accepting and/or returning Entry arguments maintain
* key-value associations. They may be useful for example when
* finding the key for the greatest value. Note that "plain" Entry
* arguments can be supplied using {@code new
* AbstractMap.SimpleEntry(k,v)}.
*
* Bulk operations may complete abruptly, throwing an
* exception encountered in the application of a supplied
* function. Bear in mind when handling such exceptions that other
* concurrently executing functions could also have thrown
* exceptions, or would have done so if the first exception had
* not occurred.
*
* Parallel speedups for bulk operations compared to sequential
* processing are common but not guaranteed. Operations involving
* brief functions on small maps may execute more slowly than
* sequential loops if the underlying work to parallelize the
* computation is more expensive than the computation itself.
* Similarly, parallelization may not lead to much actual parallelism
* if all processors are busy performing unrelated tasks.
*
* All arguments to all task methods must be non-null.
*
* jsr166e note: During transition, this class
* uses nested functional interfaces with different names but the
* same forms as those expected for JDK8.
*
* This class is a member of the
*
* Java Collections Framework.
*
* @since 1.5
* @author Doug Lea
* @param This interface exports a subset of expected JDK8
* functionality.
*
* Sample usage: Here is one (of the several) ways to compute
* the sum of the values held in a map using the ForkJoin
* framework. As illustrated here, Spliterators are well suited to
* designs in which a task repeatedly splits off half its work
* into forked subtasks until small enough to process directly,
* and then joins these subtasks. Variants of this style can also
* be used in completion-based designs.
*
* More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code key.equals(k)},
* then this method returns {@code v}; otherwise it returns
* {@code null}. (There can be at most one such mapping.)
*
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked") public V get(Object key) {
if (key == null)
throw new NullPointerException();
return (V)internalGet(key);
}
/**
* Returns the value to which the specified key is mapped,
* or the given defaultValue if this map contains no mapping for the key.
*
* @param key the key
* @param defaultValue the value to return if this map contains
* no mapping for the given key
* @return the mapping for the key, if present; else the defaultValue
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) {
if (key == null)
throw new NullPointerException();
V v = (V) internalGet(key);
return v == null ? defaultValue : v;
}
/**
* Tests if the specified object is a key in this table.
*
* @param key possible key
* @return {@code true} if and only if the specified object
* is a key in this table, as determined by the
* {@code equals} method; {@code false} otherwise
* @throws NullPointerException if the specified key is null
*/
public boolean containsKey(Object key) {
if (key == null)
throw new NullPointerException();
return internalGet(key) != null;
}
/**
* Returns {@code true} if this map maps one or more keys to the
* specified value. Note: This method may require a full traversal
* of the map, and is much slower than method {@code containsKey}.
*
* @param value value whose presence in this map is to be tested
* @return {@code true} if this map maps one or more keys to the
* specified value
* @throws NullPointerException if the specified value is null
*/
public boolean containsValue(Object value) {
if (value == null)
throw new NullPointerException();
Object v;
Traverser The value can be retrieved by calling the {@code get} method
* with a key that is equal to the original key.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked") public V put(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return (V)internalPut(key, value);
}
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key,
* or {@code null} if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return (V)internalPutIfAbsent(key, value);
}
/**
* Copies all of the mappings from the specified map to this one.
* These mappings replace any mappings that this map had for any of the
* keys currently in the specified map.
*
* @param m mappings to be stored in this map
*/
public void putAll(Map extends K, ? extends V> m) {
internalPutAll(m);
}
/**
* If the specified key is not already associated with a value,
* computes its value using the given mappingFunction and enters
* it into the map unless null. This is equivalent to
* The view's {@code iterator} is a "weakly consistent" iterator
* that will never throw {@link ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
*/
public Set
*
*
*
*
*
*
* {@code ConcurrentHashMapV8
*/
public static interface Spliterator {@code
* if (map.containsKey(key))
* return map.get(key);
* value = mappingFunction.apply(key);
* if (value != null)
* map.put(key, value);
* return value;}
*
* except that the action is performed atomically. If the
* function returns {@code null} no mapping is recorded. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and no mapping is recorded. Some
* attempted update operations on this map by other threads may be
* blocked while computation is in progress, so the computation
* should be short and simple, and must not attempt to update any
* other mappings of this Map. The most appropriate usage is to
* construct a new object serving as an initial mapped value, or
* memoized result, as in:
*
* {@code
* map.computeIfAbsent(key, new Fun
*
* @param key key with which the specified value is to be associated
* @param mappingFunction the function to compute a value
* @return the current (existing or computed) value associated with
* the specified key, or null if the computed value is null
* @throws NullPointerException if the specified key or mappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the mappingFunction does so,
* in which case the mapping is left unestablished
*/
@SuppressWarnings("unchecked") public V computeIfAbsent
(K key, Fun super K, ? extends V> mappingFunction) {
if (key == null || mappingFunction == null)
throw new NullPointerException();
return (V)internalComputeIfAbsent(key, mappingFunction);
}
/**
* If the given key is present, computes a new mapping value given a key and
* its current mapped value. This is equivalent to
* {@code
* if (map.containsKey(key)) {
* value = remappingFunction.apply(key, map.get(key));
* if (value != null)
* map.put(key, value);
* else
* map.remove(key);
* }
* }
*
* except that the action is performed atomically. If the
* function returns {@code null}, the mapping is removed. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and the current mapping is left
* unchanged. Some attempted update operations on this map by
* other threads may be blocked while computation is in progress,
* so the computation should be short and simple, and must not
* attempt to update any other mappings of this Map. For example,
* to either create or append new messages to a value mapping:
*
* @param key key with which the specified value is to be associated
* @param remappingFunction the function to compute a value
* @return the new value associated with the specified key, or null if none
* @throws NullPointerException if the specified key or remappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
@SuppressWarnings("unchecked") public V computeIfPresent
(K key, BiFun super K, ? super V, ? extends V> remappingFunction) {
if (key == null || remappingFunction == null)
throw new NullPointerException();
return (V)internalCompute(key, true, remappingFunction);
}
/**
* Computes a new mapping value given a key and
* its current mapped value (or {@code null} if there is no current
* mapping). This is equivalent to
* {@code
* value = remappingFunction.apply(key, map.get(key));
* if (value != null)
* map.put(key, value);
* else
* map.remove(key);
* }
*
* except that the action is performed atomically. If the
* function returns {@code null}, the mapping is removed. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and the current mapping is left
* unchanged. Some attempted update operations on this map by
* other threads may be blocked while computation is in progress,
* so the computation should be short and simple, and must not
* attempt to update any other mappings of this Map. For example,
* to either create or append new messages to a value mapping:
*
* {@code
* Map
*
* @param key key with which the specified value is to be associated
* @param remappingFunction the function to compute a value
* @return the new value associated with the specified key, or null if none
* @throws NullPointerException if the specified key or remappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
@SuppressWarnings("unchecked") public V compute
(K key, BiFun super K, ? super V, ? extends V> remappingFunction) {
if (key == null || remappingFunction == null)
throw new NullPointerException();
return (V)internalCompute(key, false, remappingFunction);
}
/**
* If the specified key is not already associated
* with a value, associate it with the given value.
* Otherwise, replace the value with the results of
* the given remapping function. This is equivalent to:
* {@code
* if (!map.containsKey(key))
* map.put(value);
* else {
* newValue = remappingFunction.apply(map.get(key), value);
* if (value != null)
* map.put(key, value);
* else
* map.remove(key);
* }
* }
* except that the action is performed atomically. If the
* function returns {@code null}, the mapping is removed. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and the current mapping is left
* unchanged. Some attempted update operations on this map by
* other threads may be blocked while computation is in progress,
* so the computation should be short and simple, and must not
* attempt to update any other mappings of this Map.
*/
@SuppressWarnings("unchecked") public V merge
(K key, V value, BiFun super V, ? super V, ? extends V> remappingFunction) {
if (key == null || value == null || remappingFunction == null)
throw new NullPointerException();
return (V)internalMerge(key, value, remappingFunction);
}
/**
* Removes the key (and its corresponding value) from this map.
* This method does nothing if the key is not in the map.
*
* @param key the key that needs to be removed
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked") public V remove(Object key) {
if (key == null)
throw new NullPointerException();
return (V)internalReplace(key, null, null);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if the specified key is null
*/
public boolean remove(Object key, Object value) {
if (key == null)
throw new NullPointerException();
if (value == null)
return false;
return internalReplace(key, null, value) != null;
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if any of the arguments are null
*/
public boolean replace(K key, V oldValue, V newValue) {
if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
return internalReplace(key, newValue, oldValue) != null;
}
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key,
* or {@code null} if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked") public V replace(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return (V)internalReplace(key, value, null);
}
/**
* Removes all of the mappings from this map.
*/
public void clear() {
internalClear();
}
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa.
*
* @return the set view
*/
public KeySetView
*
*
*
*
*
*
* {@code ConcurrentHashMapV8
*/
public static interface Spliterator {@code
* if (map.containsKey(key))
* return map.get(key);
* value = mappingFunction.apply(key);
* if (value != null)
* map.put(key, value);
* return value;}
*
* except that the action is performed atomically. If the
* function returns {@code null} no mapping is recorded. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and no mapping is recorded. Some
* attempted update operations on this map by other threads may be
* blocked while computation is in progress, so the computation
* should be short and simple, and must not attempt to update any
* other mappings of this Map. The most appropriate usage is to
* construct a new object serving as an initial mapped value, or
* memoized result, as in:
*
* {@code
* map.computeIfAbsent(key, new Fun
*
* @param key key with which the specified value is to be associated
* @param mappingFunction the function to compute a value
* @return the current (existing or computed) value associated with
* the specified key, or null if the computed value is null
* @throws NullPointerException if the specified key or mappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the mappingFunction does so,
* in which case the mapping is left unestablished
*/
@SuppressWarnings("unchecked") public V computeIfAbsent
(K key, Fun super K, ? extends V> mappingFunction) {
if (key == null || mappingFunction == null)
throw new NullPointerException();
return (V)internalComputeIfAbsent(key, mappingFunction);
}
/**
* If the given key is present, computes a new mapping value given a key and
* its current mapped value. This is equivalent to
* {@code
* if (map.containsKey(key)) {
* value = remappingFunction.apply(key, map.get(key));
* if (value != null)
* map.put(key, value);
* else
* map.remove(key);
* }
* }
*
* except that the action is performed atomically. If the
* function returns {@code null}, the mapping is removed. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and the current mapping is left
* unchanged. Some attempted update operations on this map by
* other threads may be blocked while computation is in progress,
* so the computation should be short and simple, and must not
* attempt to update any other mappings of this Map. For example,
* to either create or append new messages to a value mapping:
*
* @param key key with which the specified value is to be associated
* @param remappingFunction the function to compute a value
* @return the new value associated with the specified key, or null if none
* @throws NullPointerException if the specified key or remappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
@SuppressWarnings("unchecked") public V computeIfPresent
(K key, BiFun super K, ? super V, ? extends V> remappingFunction) {
if (key == null || remappingFunction == null)
throw new NullPointerException();
return (V)internalCompute(key, true, remappingFunction);
}
/**
* Computes a new mapping value given a key and
* its current mapped value (or {@code null} if there is no current
* mapping). This is equivalent to
* {@code
* value = remappingFunction.apply(key, map.get(key));
* if (value != null)
* map.put(key, value);
* else
* map.remove(key);
* }
*
* except that the action is performed atomically. If the
* function returns {@code null}, the mapping is removed. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and the current mapping is left
* unchanged. Some attempted update operations on this map by
* other threads may be blocked while computation is in progress,
* so the computation should be short and simple, and must not
* attempt to update any other mappings of this Map. For example,
* to either create or append new messages to a value mapping:
*
* {@code
* Map
*
* @param key key with which the specified value is to be associated
* @param remappingFunction the function to compute a value
* @return the new value associated with the specified key, or null if none
* @throws NullPointerException if the specified key or remappingFunction
* is null
* @throws IllegalStateException if the computation detectably
* attempts a recursive update to this map that would
* otherwise never complete
* @throws RuntimeException or Error if the remappingFunction does so,
* in which case the mapping is unchanged
*/
@SuppressWarnings("unchecked") public V compute
(K key, BiFun super K, ? super V, ? extends V> remappingFunction) {
if (key == null || remappingFunction == null)
throw new NullPointerException();
return (V)internalCompute(key, false, remappingFunction);
}
/**
* If the specified key is not already associated
* with a value, associate it with the given value.
* Otherwise, replace the value with the results of
* the given remapping function. This is equivalent to:
* {@code
* if (!map.containsKey(key))
* map.put(value);
* else {
* newValue = remappingFunction.apply(map.get(key), value);
* if (value != null)
* map.put(key, value);
* else
* map.remove(key);
* }
* }
* except that the action is performed atomically. If the
* function returns {@code null}, the mapping is removed. If the
* function itself throws an (unchecked) exception, the exception
* is rethrown to its caller, and the current mapping is left
* unchanged. Some attempted update operations on this map by
* other threads may be blocked while computation is in progress,
* so the computation should be short and simple, and must not
* attempt to update any other mappings of this Map.
*/
@SuppressWarnings("unchecked") public V merge
(K key, V value, BiFun super V, ? super V, ? extends V> remappingFunction) {
if (key == null || value == null || remappingFunction == null)
throw new NullPointerException();
return (V)internalMerge(key, value, remappingFunction);
}
/**
* Removes the key (and its corresponding value) from this map.
* This method does nothing if the key is not in the map.
*
* @param key the key that needs to be removed
* @return the previous value associated with {@code key}, or
* {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key is null
*/
@SuppressWarnings("unchecked") public V remove(Object key) {
if (key == null)
throw new NullPointerException();
return (V)internalReplace(key, null, null);
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if the specified key is null
*/
public boolean remove(Object key, Object value) {
if (key == null)
throw new NullPointerException();
if (value == null)
return false;
return internalReplace(key, null, value) != null;
}
/**
* {@inheritDoc}
*
* @throws NullPointerException if any of the arguments are null
*/
public boolean replace(K key, V oldValue, V newValue) {
if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
return internalReplace(key, newValue, oldValue) != null;
}
/**
* {@inheritDoc}
*
* @return the previous value associated with the specified key,
* or {@code null} if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
@SuppressWarnings("unchecked") public V replace(K key, V value) {
if (key == null || value == null)
throw new NullPointerException();
return (V)internalReplace(key, value, null);
}
/**
* Removes all of the mappings from this map.
*/
public void clear() {
internalClear();
}
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa.
*
* @return the set view
*/
public KeySetView