thread_safe-0.3.5/0000755000004100000410000000000012530443654014022 5ustar www-datawww-datathread_safe-0.3.5/yard-template/0000755000004100000410000000000012530443654016572 5ustar www-datawww-datathread_safe-0.3.5/yard-template/default/0000755000004100000410000000000012530443654020216 5ustar www-datawww-datathread_safe-0.3.5/yard-template/default/layout/0000755000004100000410000000000012530443654021533 5ustar www-datawww-datathread_safe-0.3.5/yard-template/default/layout/html/0000755000004100000410000000000012530443654022477 5ustar www-datawww-datathread_safe-0.3.5/yard-template/default/layout/html/footer.erb0000644000004100000410000000120012530443654024460 0ustar www-datawww-data thread_safe-0.3.5/yard-template/default/fulldoc/0000755000004100000410000000000012530443654021646 5ustar www-datawww-datathread_safe-0.3.5/yard-template/default/fulldoc/html/0000755000004100000410000000000012530443654022612 5ustar www-datawww-datathread_safe-0.3.5/yard-template/default/fulldoc/html/css/0000755000004100000410000000000012530443654023402 5ustar www-datawww-datathread_safe-0.3.5/yard-template/default/fulldoc/html/css/common.css0000644000004100000410000000414612530443654025411 0ustar www-datawww-data/* Override this file with custom rules */ body { line-height: 18px; } .docstring code, .docstring .object_link a, #filecontents code { padding: 0px 3px 1px 3px; border: 1px solid #eef; background: #f5f5ff; } #filecontents pre code, .docstring pre code { border: none; background: none; padding: 0; } #filecontents pre.code, .docstring pre.code, .tags pre.example, .docstring code, .docstring .object_link a, #filecontents code { -moz-border-radius: 2px; -webkit-border-radius: 2px; } /* syntax highlighting */ .source_code { display: none; padding: 3px 8px; border-left: 8px solid #ddd; margin-top: 5px; } #filecontents pre.code, .docstring pre.code, .source_code pre { font-family: monospace; } #filecontents pre.code, .docstring pre.code { display: block; } .source_code .lines { padding-right: 12px; color: #555; text-align: right; } #filecontents pre.code, .docstring pre.code, .tags pre.example { padding: 5px 12px; margin-top: 4px; border: 1px solid #eef; background: #f5f5ff; } pre.code { color: #000; } pre.code .info.file { color: #555; } pre.code .val { color: #036A07; } pre.code .tstring_content, pre.code .heredoc_beg, pre.code .heredoc_end, pre.code .qwords_beg, pre.code .qwords_end, pre.code .tstring, pre.code .dstring { color: #036A07; } pre.code .fid, pre.code .rubyid_new, pre.code .rubyid_to_s, pre.code .rubyid_to_sym, pre.code .rubyid_to_f, pre.code .rubyid_to_i, pre.code .rubyid_each { color: inherit; } pre.code .comment { color: #777; font-style: italic; } pre.code .const, pre.code .constant { color: inherit; font-weight: bold; font-style: italic; } pre.code .label, pre.code .symbol { color: #C5060B; } pre.code .kw, pre.code .rubyid_require, pre.code .rubyid_extend, pre.code .rubyid_include, pre.code .int { color: #0000FF; } pre.code .ivar { color: #660E7A; } pre.code .gvar, pre.code .rubyid_backref, pre.code .rubyid_nth_ref { color: #6D79DE; } pre.code .regexp, .dregexp { color: #036A07; } pre.code a { border-bottom: 1px dotted #bbf; } thread_safe-0.3.5/Rakefile0000644000004100000410000000307612530443654015475 0ustar www-datawww-datarequire "bundler/gem_tasks" require "rake/testtask" ## safely load all the rake tasks in the `tasks` directory def safe_load(file) begin load file rescue LoadError => ex puts "Error loading rake tasks from '#{file}' but will continue..." puts ex.message end end Dir.glob('tasks/**/*.rake').each do |rakefile| safe_load rakefile end task :default => :test if defined?(JRUBY_VERSION) require "ant" directory "pkg/classes" directory 'pkg/tests' desc "Clean up build artifacts" task :clean do rm_rf "pkg/classes" rm_rf "pkg/tests" rm_rf "lib/thread_safe/jruby_cache_backend.jar" end desc "Compile the extension" task :compile => "pkg/classes" do |t| ant.javac :srcdir => "ext", :destdir => t.prerequisites.first, :source => "1.5", :target => "1.5", :debug => true, :classpath => "${java.class.path}:${sun.boot.class.path}" end desc "Build the jar" task :jar => :compile do ant.jar :basedir => "pkg/classes", :destfile => "lib/thread_safe/jruby_cache_backend.jar", :includes => "**/*.class" end desc "Build test jar" task 'test-jar' => 'pkg/tests' do |t| ant.javac :srcdir => 'test/src', :destdir => t.prerequisites.first, :source => "1.5", :target => "1.5", :debug => true ant.jar :basedir => 'pkg/tests', :destfile => 'test/package.jar', :includes => '**/*.class' end task :package => [ :jar, 'test-jar' ] else # No need to package anything for non-jruby rubies task :package end Rake::TestTask.new :test => :package do |t| t.libs << "lib" t.test_files = FileList["test/**/*.rb"] end thread_safe-0.3.5/Gemfile0000644000004100000410000000101612530443654015313 0ustar www-datawww-datasource 'https://rubygems.org' gemspec group :development, :test do gem 'minitest', '~> 5.5.1' gem 'minitest-reporters', '~> 1.0.11' gem 'simplecov', '~> 0.9.2', :require => false gem 'coveralls', '~> 0.7.11', :require => false end group :documentation do gem 'countloc', '~> 0.4.0', :platforms => :mri, :require => false gem 'yard', '~> 0.8.7.6', :require => false gem 'inch', '~> 0.5.10', :platforms => :mri, :require => false gem 'redcarpet', '~> 3.2.2', platforms: :mri # understands github markdown end thread_safe-0.3.5/examples/0000755000004100000410000000000012530443654015640 5ustar www-datawww-datathread_safe-0.3.5/examples/bench_cache.rb0000755000004100000410000000113112530443654020366 0ustar www-datawww-data#!/usr/bin/env ruby -wKU require "benchmark" require "thread_safe" hash = {} cache = ThreadSafe::Cache.new ENTRIES = 10_000 ENTRIES.times do |i| hash[i] = i cache[i] = i end TESTS = 40_000_000 Benchmark.bmbm do |results| key = rand(10_000) results.report('Hash#[]') do TESTS.times { hash[key] } end results.report('Cache#[]') do TESTS.times { cache[key] } end results.report('Hash#each_pair') do (TESTS / ENTRIES).times { hash.each_pair {|k,v| v} } end results.report('Cache#each_pair') do (TESTS / ENTRIES).times { cache.each_pair {|k,v| v} } end end thread_safe-0.3.5/thread_safe.gemspec0000644000004100000410000000242712530443654017641 0ustar www-datawww-data# -*- encoding: utf-8 -*- $:.push File.expand_path('../lib', __FILE__) unless $:.include?('lib') require 'thread_safe/version' Gem::Specification.new do |gem| gem.authors = ["Charles Oliver Nutter", "thedarkone"] gem.email = ["headius@headius.com", "thedarkone2@gmail.com"] gem.description = %q{Thread-safe collections and utilities for Ruby} gem.summary = %q{A collection of data structures and utilities to make thread-safe programming in Ruby easier} gem.homepage = "https://github.com/ruby-concurrency/thread_safe" gem.files = `git ls-files`.split($\) gem.files += ['lib/thread_safe/jruby_cache_backend.jar'] if defined?(JRUBY_VERSION) gem.files -= ['.gitignore'] # see https://github.com/headius/thread_safe/issues/40#issuecomment-42315441 gem.platform = 'java' if defined?(JRUBY_VERSION) gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) } gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) gem.name = "thread_safe" gem.require_paths = ["lib"] gem.version = ThreadSafe::VERSION gem.license = "Apache-2.0" gem.add_development_dependency 'atomic', '= 1.1.16' gem.add_development_dependency 'rake' gem.add_development_dependency 'minitest', '>= 4' end thread_safe-0.3.5/.travis.yml0000644000004100000410000000132612530443654016135 0ustar www-datawww-datalanguage: ruby rvm: - 2.2.0 - 2.1.5 - 2.1.4 - 2.0.0 - 1.9.3 - ruby-head - jruby-1.7.18 - jruby-head - rbx-2 jdk: # for JRuby only - openjdk7 - oraclejdk8 matrix: exclude: - rvm: 2.2.0 jdk: openjdk7 jdk: oraclejdk8 - rvm: 2.1.5 jdk: openjdk7 jdk: oraclejdk8 - rvm: 2.1.4 jdk: openjdk7 jdk: oraclejdk8 - rvm: 2.0.0 jdk: openjdk7 jdk: oraclejdk8 - rvm: 1.9.3 jdk: openjdk7 jdk: oraclejdk8 - rvm: ruby-head jdk: openjdk7 jdk: oraclejdk8 - rvm: rbx-2 jdk: openjdk7 jdk: oraclejdk8 allow_failures: - rvm: ruby-head - rvm: jruby-head - rvm: 1.9.3 script: "rake TESTOPTS='--seed=1'" thread_safe-0.3.5/lib/0000755000004100000410000000000012530443654014570 5ustar www-datawww-datathread_safe-0.3.5/lib/thread_safe.rb0000644000004100000410000000361012530443654017362 0ustar www-datawww-datarequire 'thread_safe/version' require 'thread_safe/synchronized_delegator' module ThreadSafe autoload :Cache, 'thread_safe/cache' autoload :Util, 'thread_safe/util' # Various classes within allows for +nil+ values to be stored, so a special +NULL+ token is required to indicate the "nil-ness". NULL = Object.new if defined?(JRUBY_VERSION) require 'jruby/synchronized' # A thread-safe subclass of Array. This version locks # against the object itself for every method call, # ensuring only one thread can be reading or writing # at a time. This includes iteration methods like # #each. class Array < ::Array include JRuby::Synchronized end # A thread-safe subclass of Hash. This version locks # against the object itself for every method call, # ensuring only one thread can be reading or writing # at a time. This includes iteration methods like # #each. class Hash < ::Hash include JRuby::Synchronized end elsif !defined?(RUBY_ENGINE) || RUBY_ENGINE == 'ruby' # Because MRI never runs code in parallel, the existing # non-thread-safe structures should usually work fine. Array = ::Array Hash = ::Hash elsif defined?(RUBY_ENGINE) && RUBY_ENGINE == 'rbx' require 'monitor' class Hash < ::Hash; end class Array < ::Array; end [Hash, Array].each do |klass| klass.class_eval do private def _mon_initialize @_monitor = Monitor.new unless @_monitor # avoid double initialisation end def self.allocate obj = super obj.send(:_mon_initialize) obj end end klass.superclass.instance_methods(false).each do |method| klass.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{method}(*args) @_monitor.synchronize { super } end RUBY_EVAL end end end end thread_safe-0.3.5/lib/thread_safe/0000755000004100000410000000000012530443654017035 5ustar www-datawww-datathread_safe-0.3.5/lib/thread_safe/cache.rb0000644000004100000410000001100612530443654020423 0ustar www-datawww-datarequire 'thread' module ThreadSafe autoload :JRubyCacheBackend, 'thread_safe/jruby_cache_backend' autoload :MriCacheBackend, 'thread_safe/mri_cache_backend' autoload :NonConcurrentCacheBackend, 'thread_safe/non_concurrent_cache_backend' autoload :AtomicReferenceCacheBackend, 'thread_safe/atomic_reference_cache_backend' autoload :SynchronizedCacheBackend, 'thread_safe/synchronized_cache_backend' ConcurrentCacheBackend = if defined?(RUBY_ENGINE) case RUBY_ENGINE when 'jruby'; JRubyCacheBackend when 'ruby'; MriCacheBackend when 'rbx'; AtomicReferenceCacheBackend else warn 'ThreadSafe: unsupported Ruby engine, using a fully synchronized ThreadSafe::Cache implementation' if $VERBOSE SynchronizedCacheBackend end else MriCacheBackend end class Cache < ConcurrentCacheBackend KEY_ERROR = defined?(KeyError) ? KeyError : IndexError # there is no KeyError in 1.8 mode def initialize(options = nil, &block) if options.kind_of?(::Hash) validate_options_hash!(options) else options = nil end super(options) @default_proc = block end def [](key) if value = super # non-falsy value is an existing mapping, return it right away value # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value # would be returned) # note: nil == value check is not technically necessary elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) @default_proc.call(self, key) else value end end alias_method :get, :[] alias_method :put, :[]= def fetch(key, default_value = NULL) if NULL != (value = get_or_default(key, NULL)) value elsif block_given? yield key elsif NULL != default_value default_value else raise_fetch_no_key end end def fetch_or_store(key, default_value = NULL) fetch(key) do put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) end end def put_if_absent(key, value) computed = false result = compute_if_absent(key) do computed = true value end computed ? nil : result end unless method_defined?(:put_if_absent) def value?(value) each_value do |v| return true if value.equal?(v) end false end unless method_defined?(:value?) def keys arr = [] each_pair {|k, v| arr << k} arr end unless method_defined?(:keys) def values arr = [] each_pair {|k, v| arr << v} arr end unless method_defined?(:values) def each_key each_pair {|k, v| yield k} end unless method_defined?(:each_key) def each_value each_pair {|k, v| yield v} end unless method_defined?(:each_value) def key(value) each_pair {|k, v| return k if v == value} nil end unless method_defined?(:key) alias_method :index, :key if RUBY_VERSION < '1.9' def empty? each_pair {|k, v| return false} true end unless method_defined?(:empty?) def size count = 0 each_pair {|k, v| count += 1} count end unless method_defined?(:size) def marshal_dump raise TypeError, "can't dump hash with default proc" if @default_proc h = {} each_pair {|k, v| h[k] = v} h end def marshal_load(hash) initialize populate_from(hash) end undef :freeze private def raise_fetch_no_key raise KEY_ERROR, 'key not found' end def initialize_copy(other) super populate_from(other) end def populate_from(hash) hash.each_pair {|k, v| self[k] = v} self end def validate_options_hash!(options) if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Fixnum) || initial_capacity < 0) raise ArgumentError, ":initial_capacity must be a positive Fixnum" end if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) raise ArgumentError, ":load_factor must be a number between 0 and 1" end end end end thread_safe-0.3.5/lib/thread_safe/synchronized_cache_backend.rb0000644000004100000410000000251012530443654024671 0ustar www-datawww-datamodule ThreadSafe class SynchronizedCacheBackend < NonConcurrentCacheBackend require 'mutex_m' include Mutex_m # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are # not allowed to call each other. def [](key) synchronize { super } end def []=(key, value) synchronize { super } end def compute_if_absent(key) synchronize { super } end def compute_if_present(key) synchronize { super } end def compute(key) synchronize { super } end def merge_pair(key, value) synchronize { super } end def replace_pair(key, old_value, new_value) synchronize { super } end def replace_if_exists(key, new_value) synchronize { super } end def get_and_set(key, value) synchronize { super } end def key?(key) synchronize { super } end def value?(value) synchronize { super } end def delete(key) synchronize { super } end def delete_pair(key, value) synchronize { super } end def clear synchronize { super } end def size synchronize { super } end def get_or_default(key, default_value) synchronize { super } end private def dupped_backend synchronize { super } end end end thread_safe-0.3.5/lib/thread_safe/non_concurrent_cache_backend.rb0000644000004100000410000000524612530443654025217 0ustar www-datawww-datamodule ThreadSafe class NonConcurrentCacheBackend # WARNING: all public methods of the class must operate on the @backend # directly without calling each other. This is important because of the # SynchronizedCacheBackend which uses a non-reentrant mutex for perfomance # reasons. def initialize(options = nil) @backend = {} end def [](key) @backend[key] end def []=(key, value) @backend[key] = value end def compute_if_absent(key) if NULL != (stored_value = @backend.fetch(key, NULL)) stored_value else @backend[key] = yield end end def replace_pair(key, old_value, new_value) if pair?(key, old_value) @backend[key] = new_value true else false end end def replace_if_exists(key, new_value) if NULL != (stored_value = @backend.fetch(key, NULL)) @backend[key] = new_value stored_value end end def compute_if_present(key) if NULL != (stored_value = @backend.fetch(key, NULL)) store_computed_value(key, yield(stored_value)) end end def compute(key) store_computed_value(key, yield(@backend[key])) end def merge_pair(key, value) if NULL == (stored_value = @backend.fetch(key, NULL)) @backend[key] = value else store_computed_value(key, yield(stored_value)) end end def get_and_set(key, value) stored_value = @backend[key] @backend[key] = value stored_value end def key?(key) @backend.key?(key) end def value?(value) @backend.value?(value) end def delete(key) @backend.delete(key) end def delete_pair(key, value) if pair?(key, value) @backend.delete(key) true else false end end def clear @backend.clear self end def each_pair dupped_backend.each_pair do |k, v| yield k, v end self end def size @backend.size end def get_or_default(key, default_value) @backend.fetch(key, default_value) end alias_method :_get, :[] alias_method :_set, :[]= private :_get, :_set private def initialize_copy(other) super @backend = {} self end def dupped_backend @backend.dup end def pair?(key, expected_value) NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) end def store_computed_value(key, new_value) if new_value.nil? @backend.delete(key) nil else @backend[key] = new_value end end end end thread_safe-0.3.5/lib/thread_safe/mri_cache_backend.rb0000644000004100000410000000404212530443654022743 0ustar www-datawww-datamodule ThreadSafe class MriCacheBackend < NonConcurrentCacheBackend # We can get away with a single global write lock (instead of a per-instance # one) because of the GVL/green threads. # # The previous implementation used `Thread.critical` on 1.8 MRI to implement # the 4 composed atomic operations (`put_if_absent`, `replace_pair`, # `replace_if_exists`, `delete_pair`) this however doesn't work for # `compute_if_absent` because on 1.8 the Mutex class is itself implemented # via `Thread.critical` and a call to `Mutex#lock` does not restore the # previous `Thread.critical` value (thus any synchronisation clears the # `Thread.critical` flag and we loose control). This poses a problem as the # provided block might use synchronisation on its own. # # NOTE: a neat idea of writing a c-ext to manually perform atomic # put_if_absent, while relying on Ruby not releasing a GVL while calling a # c-ext will not work because of the potentially Ruby implemented `#hash` # and `#eql?` key methods. WRITE_LOCK = Mutex.new def []=(key, value) WRITE_LOCK.synchronize { super } end def compute_if_absent(key) if stored_value = _get(key) # fast non-blocking path for the most likely case stored_value else WRITE_LOCK.synchronize { super } end end def compute_if_present(key) WRITE_LOCK.synchronize { super } end def compute(key) WRITE_LOCK.synchronize { super } end def merge_pair(key, value) WRITE_LOCK.synchronize { super } end def replace_pair(key, old_value, new_value) WRITE_LOCK.synchronize { super } end def replace_if_exists(key, new_value) WRITE_LOCK.synchronize { super } end def get_and_set(key, value) WRITE_LOCK.synchronize { super } end def delete(key) WRITE_LOCK.synchronize { super } end def delete_pair(key, value) WRITE_LOCK.synchronize { super } end def clear WRITE_LOCK.synchronize { super } end end end thread_safe-0.3.5/lib/thread_safe/version.rb0000644000004100000410000000075412530443654021055 0ustar www-datawww-datamodule ThreadSafe VERSION = "0.3.5" end # NOTE: <= 0.2.0 used Threadsafe::VERSION # @private module Threadsafe # @private def self.const_missing(name) name = name.to_sym if ThreadSafe.const_defined?(name) warn "[DEPRECATION] `Threadsafe::#{name}' is deprecated, use `ThreadSafe::#{name}' instead." ThreadSafe.const_get(name) else warn "[DEPRECATION] the `Threadsafe' module is deprecated, please use `ThreadSafe` instead." super end end end thread_safe-0.3.5/lib/thread_safe/util/0000755000004100000410000000000012530443654020012 5ustar www-datawww-datathread_safe-0.3.5/lib/thread_safe/util/power_of_two_tuple.rb0000644000004100000410000000114212530443654024257 0ustar www-datawww-datamodule ThreadSafe module Util class PowerOfTwoTuple < VolatileTuple def initialize(size) raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 super(size) end def hash_to_index(hash) (size - 1) & hash end def volatile_get_by_hash(hash) volatile_get(hash_to_index(hash)) end def volatile_set_by_hash(hash, value) volatile_set(hash_to_index(hash), value) end def next_in_size_table self.class.new(size << 1) end end end end thread_safe-0.3.5/lib/thread_safe/util/striped64.rb0000644000004100000410000002151512530443654022167 0ustar www-datawww-datamodule ThreadSafe module Util # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 # available in public domain. # # Original source code available here: # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 # # Class holding common representation and mechanics for classes supporting # dynamic striping on 64bit values. # # This class maintains a lazily-initialized table of atomically updated # variables, plus an extra +base+ field. The table size is a power of two. # Indexing uses masked per-thread hash codes. Nearly all methods on this # class are private, accessed directly by subclasses. # # Table entries are of class +Cell+; a variant of AtomicLong padded to # reduce cache contention on most processors. Padding is overkill for most # Atomics because they are usually irregularly scattered in memory and thus # don't interfere much with each other. But Atomic objects residing in # arrays will tend to be placed adjacent to each other, and so will most # often share cache lines (with a huge negative performance impact) without # this precaution. # # In part because +Cell+s are relatively large, we avoid creating them until # they are needed. When there is no contention, all updates are made to the # +base+ field. Upon first contention (a failed CAS on +base+ update), the # table is initialized to size 2. The table size is doubled upon further # contention until reaching the nearest power of two greater than or equal # to the number of CPUS. Table slots remain empty (+nil+) until they are # needed. # # A single spinlock (+busy+) is used for initializing and resizing the # table, as well as populating slots with new +Cell+s. There is no need for # a blocking lock: When the lock is not available, threads try other slots # (or the base). During these retries, there is increased contention and # reduced locality, which is still better than alternatives. # # Per-thread hash codes are initialized to random values. Contention and/or # table collisions are indicated by failed CASes when performing an update # operation (see method +retry_update+). Upon a collision, if the table size # is less than the capacity, it is doubled in size unless some other thread # holds the lock. If a hashed slot is empty, and lock is available, a new # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries # proceed by "double hashing", using a secondary hash (XorShift) to try to # find a free slot. # # The table size is capped because, when there are more threads than CPUs, # supposing that each thread were bound to a CPU, there would exist a # perfect hash function mapping threads to slots that eliminates collisions. # When we reach capacity, we search for this mapping by randomly varying the # hash codes of colliding threads. Because search is random, and collisions # only become known via CAS failures, convergence can be slow, and because # threads are typically not bound to CPUS forever, may not occur at all. # However, despite these limitations, observed contention rates are # typically low in these cases. # # It is possible for a +Cell+ to become unused when threads that once hashed # to it terminate, as well as in the case where doubling the table causes no # thread to hash to it under expanded mask. We do not try to detect or # remove such cells, under the assumption that for long-running instances, # observed contention levels will recur, so the cells will eventually be # needed again; and for short-lived ones, it does not matter. class Striped64 # Padded variant of AtomicLong supporting only raw accesses plus CAS. # The +value+ field is placed between pads, hoping that the JVM doesn't # reorder them. # # Optimisation note: It would be possible to use a release-only # form of CAS here, if it were provided. class Cell < AtomicReference # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot attr_reader *(Array.new(12).map {|i| :"padding_#{i}"}) alias_method :cas, :compare_and_set def cas_computed cas(current_value = value, yield(current_value)) end end extend Volatile attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. alias_method :busy?, :busy def initialize super() self.busy = false self.base = 0 end # Handles cases of updates involving initialization, resizing, # creating new Cells, and/or contention. See above for # explanation. This method suffers the usual non-modularity # problems of optimistic retry code, relying on rechecked sets of # reads. # # Arguments: # [+x+] # the value # [+hash_code+] # hash code used # [+x+] # false if CAS failed before call def retry_update(x, hash_code, was_uncontended) # :yields: current_value hash = hash_code collided = false # True if last slot nonempty while true if current_cells = cells if !(cell = current_cells.volatile_get_by_hash(hash)) if busy? collided = false else # Try to attach new Cell if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell break else redo # Slot is now non-empty end end elsif !was_uncontended # CAS already known to fail was_uncontended = true # Continue after rehash elsif cell.cas_computed {|current_value| yield current_value} break elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale collided = false elsif collided && expand_table_unless_stale(current_cells) collided = false redo # Retry with expanded table else collided = true end hash = XorShiftRandom.xorshift(hash) elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} break end end self.hash_code = hash end private # Static per-thread hash code key. Shared across all instances to # reduce Thread locals pollution and because adjustments due to # collisions in one table are likely to be appropriate for # others. THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym # A thread-local hash code accessor. The code is initially # random, but may be set to a different value upon collisions. def hash_code Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get end def hash_code=(hash) Thread.current[THREAD_LOCAL_KEY] = hash end # Sets base and all +cells+ to the given value. def internal_reset(initial_value) current_cells = cells self.base = initial_value if current_cells current_cells.each do |cell| cell.value = initial_value if cell end end end def cas_base_computed cas_base(current_base = base, yield(current_base)) end def free? !busy? end def try_initialize_cells(x, hash) if free? && !cells try_in_busy do unless cells # Recheck under lock new_cells = PowerOfTwoTuple.new(2) new_cells.volatile_set_by_hash(hash, Cell.new(x)) self.cells = new_cells end end end end def expand_table_unless_stale(current_cells) try_in_busy do if current_cells == cells # Recheck under lock new_cells = current_cells.next_in_size_table current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} self.cells = new_cells end end end def try_to_install_new_cell(new_cell, hash) try_in_busy do # Recheck under lock if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) current_cells.volatile_set(i, new_cell) end end end def try_in_busy if cas_busy(false, true) begin yield ensure self.busy = false end end end end end end thread_safe-0.3.5/lib/thread_safe/util/xor_shift_random.rb0000644000004100000410000000263112530443654023706 0ustar www-datawww-datamodule ThreadSafe module Util # A xorshift random number (positive +Fixnum+s) generator, provides # reasonably cheap way to generate thread local random numbers without # contending for the global +Kernel.rand+. # # Usage: # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed # while true # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number # do_something_at_random # end # end module XorShiftRandom extend self MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. def get Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted end # xorshift based on: http://www.jstatsoft.org/v08/i14/paper if 0.size == 4 # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows def xorshift(x) x ^= x >> 3 x ^= (x << 1) & MAX_INT # cut-off Bignum overflow x ^= x >> 14 end else # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows def xorshift(x) x ^= x >> 1 x ^= (x << 1) & MAX_INT # cut-off Bignum overflow x ^= x >> 54 end end end end end thread_safe-0.3.5/lib/thread_safe/util/atomic_reference.rb0000644000004100000410000000235012530443654023631 0ustar www-datawww-datamodule ThreadSafe module Util AtomicReference = if defined?(Rubinius::AtomicReference) # An overhead-less atomic reference. Rubinius::AtomicReference else begin require 'atomic' defined?(Atomic::InternalReference) ? Atomic::InternalReference : Atomic rescue LoadError, NameError require 'thread' # get Mutex on 1.8 class FullLockingAtomicReference def initialize(value = nil) @___mutex = Mutex.new @___value = value end def get @___mutex.synchronize { @___value } end alias_method :value, :get def set(new_value) @___mutex.synchronize { @___value = new_value } end alias_method :value=, :set def compare_and_set(old_value, new_value) return false unless @___mutex.try_lock begin return false unless @___value.equal? old_value @___value = new_value ensure @___mutex.unlock end true end end FullLockingAtomicReference end end end end thread_safe-0.3.5/lib/thread_safe/util/volatile.rb0000644000004100000410000000404512530443654022161 0ustar www-datawww-datamodule ThreadSafe module Util module Volatile # Provides +volatile+ (in the JVM's sense) attribute accessors implemented # atop of the +AtomicReference+s. # # Usage: # class Foo # extend ThreadSafe::Util::Volatile # attr_volatile :foo, :bar # # def initialize(bar) # super() # must super() into parent initializers before using the volatile attribute accessors # self.bar = bar # end # # def hello # my_foo = foo # volatile read # self.foo = 1 # volatile write # cas_foo(1, 2) # => true | a strong CAS # end # end def attr_volatile(*attr_names) return if attr_names.empty? include(Module.new do atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = ThreadSafe::Util::AtomicReference.new"} initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" end class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def initialize(*) super #{atomic_ref_setup.join('; ')} end def initialize_copy(other) super #{initialize_copy_setup.join('; ')} end RUBY_EVAL attr_names.each do |attr_name| class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{attr_name} @__#{attr_name}.get end def #{attr_name}=(value) @__#{attr_name}.set(value) end def compare_and_set_#{attr_name}(old_value, new_value) @__#{attr_name}.compare_and_set(old_value, new_value) end RUBY_EVAL alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" end end) end end end end thread_safe-0.3.5/lib/thread_safe/util/volatile_tuple.rb0000644000004100000410000000200512530443654023364 0ustar www-datawww-datamodule ThreadSafe module Util # A fixed size array with volatile volatile getters/setters. # Usage: # arr = VolatileTuple.new(16) # arr.volatile_set(0, :foo) # arr.volatile_get(0) # => :foo # arr.cas(0, :foo, :bar) # => true # arr.volatile_get(0) # => :bar class VolatileTuple include Enumerable Tuple = defined?(Rubinius::Tuple) ? Rubinius::Tuple : Array def initialize(size) @tuple = tuple = Tuple.new(size) i = 0 while i < size tuple[i] = AtomicReference.new i += 1 end end def volatile_get(i) @tuple[i].get end def volatile_set(i, value) @tuple[i].set(value) end def compare_and_set(i, old_value, new_value) @tuple[i].compare_and_set(old_value, new_value) end alias_method :cas, :compare_and_set def size @tuple.size end def each @tuple.each {|ref| yield ref.get} end end end end thread_safe-0.3.5/lib/thread_safe/util/adder.rb0000644000004100000410000000425212530443654021421 0ustar www-datawww-datamodule ThreadSafe module Util # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 # available in public domain. # # Original source code available here: # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 # # One or more variables that together maintain an initially zero # sum. When updates (method +add+) are contended across threads, # the set of variables may grow dynamically to reduce contention. # Method +sum+ returns the current total combined across the # variables maintaining the sum. # # This class is usually preferable to single +Atomic+ reference when # multiple threads update a common sum that is used for purposes such # as collecting statistics, not for fine-grained synchronization # control. Under low update contention, the two classes have similar # characteristics. But under high contention, expected throughput of # this class is significantly higher, at the expense of higher space # consumption. class Adder < Striped64 # Adds the given value. def add(x) if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} was_uncontended = true hash = hash_code unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) retry_update(x, hash, was_uncontended) {|current_value| current_value + x} end end end def increment add(1) end def decrement add(-1) end # Returns the current sum. The returned value is _NOT_ an # atomic snapshot: Invocation in the absence of concurrent # updates returns an accurate result, but concurrent updates that # occur while the sum is being calculated might not be # incorporated. def sum x = base if current_cells = cells current_cells.each do |cell| x += cell.value if cell end end x end def reset internal_reset(0) end end end end thread_safe-0.3.5/lib/thread_safe/util/cheap_lockable.rb0000644000004100000410000000611012530443654023251 0ustar www-datawww-datamodule ThreadSafe module Util # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ # with the +ConditionVariable+ bundled in. # # Usage: # class A # include CheapLockable # # def do_exlusively # cheap_synchronize { yield } # end # # def wait_for_something # cheap_synchronize do # cheap_wait until resource_available? # do_something # cheap_broadcast # wake up others # end # end # end module CheapLockable private engine = defined?(RUBY_ENGINE) && RUBY_ENGINE if engine == 'rbx' # Making use of the Rubinius' ability to lock via object headers to avoid the overhead of the extra Mutex objects. def cheap_synchronize Rubinius.lock(self) begin yield ensure Rubinius.unlock(self) end end def cheap_wait wchan = Rubinius::Channel.new begin waiters = @waiters ||= [] waiters.push wchan Rubinius.unlock(self) signaled = wchan.receive_timeout nil ensure Rubinius.lock(self) unless signaled or waiters.delete(wchan) # we timed out, but got signaled afterwards (e.g. while waiting to # acquire @lock), so pass that signal on to the next waiter waiters.shift << true unless waiters.empty? end end self end def cheap_broadcast waiters = @waiters ||= [] waiters.shift << true until waiters.empty? self end elsif engine == 'jruby' # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects require 'jruby' def cheap_synchronize JRuby.reference0(self).synchronized { yield } end def cheap_wait JRuby.reference0(self).wait end def cheap_broadcast JRuby.reference0(self).notify_all end else require 'thread' extend Volatile attr_volatile :mutex # Non-reentrant Mutex#syncrhonize def cheap_synchronize true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) my_mutex.synchronize { yield } end # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. # Must only be called in +cheap_broadcast+'s block. def cheap_wait conditional_variable = @conditional_variable ||= ConditionVariable.new conditional_variable.wait(mutex) end # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. # Must only be called in +cheap_broadcast+'s block. def cheap_broadcast if conditional_variable = @conditional_variable conditional_variable.broadcast end end end end end end thread_safe-0.3.5/lib/thread_safe/atomic_reference_cache_backend.rb0000644000004100000410000010363412530443654025455 0ustar www-datawww-datamodule ThreadSafe # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 # available in public domain. # # Original source code available here: # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 # # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose # size exceeds a threshold). # # A hash table supporting full concurrency of retrievals and high expected # concurrency for updates. However, even though all operations are # thread-safe, retrieval operations do _not_ entail locking, and there is # _not_ any support for locking the entire table in a way that prevents all # access. # # Retrieval operations generally do not block, so may overlap with update # operations. Retrievals reflect the results of the most recently _completed_ # update operations holding upon their onset. (More formally, an update # operation for a given key bears a _happens-before_ relation with any (non # +nil+) retrieval for that key reporting the updated value.) For aggregate # operations such as +clear()+, concurrent retrievals may reflect insertion or # removal of only some entries. Similarly, the +each_pair+ iterator yields # elements reflecting the state of the hash table at some point at or since # the start of the +each_pair+. Bear in mind that the results of aggregate # status methods including +size()+ and +empty?+} are typically useful only # when a map is not undergoing concurrent updates in other threads. Otherwise # the results of these methods reflect transient states that may be adequate # for monitoring or estimation purposes, but not for program control. # # The table is dynamically expanded when there are too many collisions (i.e., # keys that have distinct hash codes but fall into the same slot modulo the # table size), with the expected average effect of maintaining roughly two # bins per mapping (corresponding to a 0.75 load factor threshold for # resizing). There may be much variance around this average as mappings are # added and removed, but overall, this maintains a commonly accepted # time/space tradeoff for hash tables. However, resizing this or any other # kind of hash table may be a relatively slow operation. When possible, it is # a good idea to provide a size estimate as an optional :initial_capacity # initializer argument. An additional optional :load_factor constructor # argument provides a further means of customizing initial table capacity by # specifying the table density to be used in calculating the amount of space # to allocate for the given number of elements. Note that using many keys with # exactly the same +hash+ is a sure way to slow down performance of any hash # table. # # ## Design overview # # The primary design goal of this hash table is to maintain concurrent # readability (typically method +[]+, but also iteration and related methods) # while minimizing update contention. Secondary goals are to keep space # consumption about the same or better than plain +Hash+, and to support high # initial insertion rates on an empty table by many threads. # # Each key-value mapping is held in a +Node+. The validation-based approach # explained below leads to a lot of code sprawl because retry-control # precludes factoring into smaller methods. # # The table is lazily initialized to a power-of-two size upon the first # insertion. Each bin in the table normally contains a list of +Node+s (most # often, the list has only zero or one +Node+). Table accesses require # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are # always accurately traversable under volatile reads, so long as lookups check # hash code and non-nullness of value before checking key equality. # # We use the top two bits of +Node+ hash fields for control purposes -- they # are available anyway because of addressing constraints. As explained further # below, these top bits are used as follows: # # - 00 - Normal # - 01 - Locked # - 11 - Locked and may have a thread waiting for lock # - 10 - +Node+ is a forwarding node # # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, # except for forwarding nodes, for which the lower bits are zero (and so # always have hash field == +MOVED+). # # Insertion (via +[]=+ or its variants) of the first node in an empty bin is # performed by just CASing it to the bin. This is by far the most common case # for put operations under most key/hash distributions. Other update # operations (insert, delete, and replace) require locks. We do not want to # waste the space required to associate a distinct lock object with each bin, # so instead use the first node of a bin list itself as a lock. Blocking # support for these locks relies +Util::CheapLockable. However, we also need a # +try_lock+ construction, so we overlay these by using bits of the +Node+ # hash field for lock control (see above), and so normally use builtin # monitors only for blocking and signalling using # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. # # Using the first node of a list as a lock does not by itself suffice though: # When a node is locked, any update must first validate that it is still the # first node after locking it, and retry if not. Because new nodes are always # appended to lists, once a node is first in a bin, it remains first until # deleted or the bin becomes invalidated (upon resizing). However, operations # that only conditionally update may inspect nodes until the point of update. # This is a converse of sorts to the lazy locking technique described by # Herlihy & Shavit. # # The main disadvantage of per-bin locks is that other update operations on # other nodes in a bin list protected by the same lock can stall, for example # when user +eql?+ or mapping functions take a long time. However, # statistically, under random hash codes, this is not a common problem. # Ideally, the frequency of nodes in bins follows a Poisson distribution # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of # about 0.5 on average, given the resizing threshold of 0.75, although with a # large variance because of resizing granularity. Ignoring variance, the # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / # factorial(k)). The first values are: # # - 0: 0.60653066 # - 1: 0.30326533 # - 2: 0.07581633 # - 3: 0.01263606 # - 4: 0.00157952 # - 5: 0.00015795 # - 6: 0.00001316 # - 7: 0.00000094 # - 8: 0.00000006 # - more: less than 1 in ten million # # Lock contention probability for two threads accessing distinct elements is # roughly 1 / (8 * #elements) under random hashes. # # The table is resized when occupancy exceeds a percentage threshold # (nominally, 0.75, but see below). Only a single thread performs the resize # (using field +size_control+, to arrange exclusion), but the table otherwise # remains usable for reads and updates. Resizing proceeds by transferring # bins, one by one, from the table to the next table. Because we are using # power-of-two expansion, the elements from each bin must either stay at same # index, or move with a power of two offset. We eliminate unnecessary node # creation by catching cases where old nodes can be reused because their next # fields won't change. On average, only about one-sixth of them need cloning # when a table doubles. The nodes they replace will be garbage collectable as # soon as they are no longer referenced by any reader thread that may be in # the midst of concurrently traversing table. Upon transfer, the old table bin # contains only a special forwarding node (with hash field +MOVED+) that # contains the next table as its key. On encountering a forwarding node, # access and update operations restart, using the new table. # # Each bin transfer requires its bin lock. However, unlike other cases, a # transfer can skip a bin if it fails to acquire its lock, and revisit it # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that # have been skipped because of failure to acquire a lock, and blocks only if # none are available (i.e., only very rarely). The transfer operation must # also ensure that all accessible bins in both the old and new table are # usable by any traversal. When there are no lock acquisition failures, this # is arranged simply by proceeding from the last bin (+table.size - 1+) up # towards the first. Upon seeing a forwarding node, traversals arrange to move # to the new table without revisiting nodes. However, when any node is skipped # during a transfer, all earlier table bins may have become visible, so are # initialized with a reverse-forwarding node back to the old table until the # new ones are established. (This sometimes requires transiently locking a # forwarding node, which is possible under the above encoding.) These more # expensive mechanics trigger only when necessary. # # The traversal scheme also applies to partial traversals of # ranges of bins (via an alternate Traverser constructor) # to support partitioned aggregate operations. Also, read-only # operations give up if ever forwarded to a null table, which # provides support for shutdown-style clearing, which is also not # currently implemented. # # Lazy table initialization minimizes footprint until first use. # # The element count is maintained using a +ThreadSafe::Util::Adder+, # which avoids contention on updates but can encounter cache thrashing # if read too frequently during concurrent access. To avoid reading so # often, resizing is attempted either when a bin lock is # contended, or upon adding to a bin already holding two or more # nodes (checked before adding in the +x_if_absent+ methods, after # adding in others). Under uniform hash distributions, the # probability of this occurring at threshold is around 13%, # meaning that only about 1 in 8 puts check threshold (and after # resizing, many fewer do so). But this approximation has high # variance for small table sizes, so we check on any collision # for sizes <= 64. The bulk putAll operation further reduces # contention by only committing count updates upon these size # checks. class AtomicReferenceCacheBackend class Table < Util::PowerOfTwoTuple def cas_new_node(i, hash, key, value) cas(i, nil, Node.new(hash, key, value)) end def try_to_cas_in_computed(i, hash, key) succeeded = false new_value = nil new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) if cas(i, nil, new_node) begin if NULL == (new_value = yield(NULL)) was_null = true else new_node.value = new_value end succeeded = true ensure volatile_set(i, nil) if !succeeded || was_null new_node.unlock_via_hash(locked_hash, hash) end end return succeeded, new_value end def try_lock_via_hash(i, node, node_hash) node.try_lock_via_hash(node_hash) do yield if volatile_get(i) == node end end def delete_node_at(i, node, predecessor_node) if predecessor_node predecessor_node.next = node.next else volatile_set(i, node.next) end end end # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do # not contain user keys or values. Otherwise, keys are never +nil+, and # +NULL+ +value+ fields indicate that a node is in the process of being # deleted or created. For purposes of read-only access, a key may be read # before a value, but can only be used after checking value to be +!= NULL+. class Node extend Util::Volatile attr_volatile :hash, :value, :next include Util::CheapLockable bit_shift = Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves # Encodings for special uses of Node hash fields. See above for explanation. MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash SPIN_LOCK_ATTEMPTS = Util::CPU_COUNT > 1 ? Util::CPU_COUNT * 2 : 0 attr_reader :key def initialize(hash, key, value, next_node = nil) super() @key = key self.lazy_set_hash(hash) self.lazy_set_value(value) self.next = next_node end # Spins a while if +LOCKED+ bit set and this node is the first of its bin, # and then sets +WAITING+ bits on hash field and blocks (once) if they are # still set. It is OK for this method to return even if lock is not # available upon exit, which enables these simple single-wait mechanics. # # The corresponding signalling operation is performed within callers: Upon # detecting that +WAITING+ has been set when unlocking lock (via a failed # CAS from non-waiting +LOCKED+ state), unlockers acquire the # +cheap_synchronize+ lock and perform a +cheap_broadcast+. def try_await_lock(table, i) if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? spins = SPIN_LOCK_ATTEMPTS randomizer = base_randomizer = Util::XorShiftRandom.get while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) if spins >= 0 if (randomizer = (randomizer >> 1)).even? # spin at random if (spins -= 1) == 0 Thread.pass # yield before blocking else randomizer = base_randomizer = Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? end end elsif cas_hash(my_hash, my_hash | WAITING) force_aquire_lock(table, i) break end end end end def key?(key) @key.eql?(key) end def matches?(key, hash) pure_hash == hash && key?(key) end def pure_hash hash & HASH_BITS end def try_lock_via_hash(node_hash = hash) if cas_hash(node_hash, locked_hash = node_hash | LOCKED) begin yield ensure unlock_via_hash(locked_hash, node_hash) end end end def locked? self.class.locked_hash?(hash) end def unlock_via_hash(locked_hash, node_hash) unless cas_hash(locked_hash, node_hash) self.hash = node_hash cheap_synchronize { cheap_broadcast } end end private def force_aquire_lock(table, i) cheap_synchronize do if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING cheap_wait else cheap_broadcast # possibly won race vs signaller end end end class << self def locked_hash?(hash) (hash & LOCKED) != 0 end end end # shorthands MOVED = Node::MOVED LOCKED = Node::LOCKED WAITING = Node::WAITING HASH_BITS = Node::HASH_BITS NOW_RESIZING = -1 DEFAULT_CAPACITY = 16 MAX_CAPACITY = Util::MAX_INT # The buffer size for skipped bins during transfers. The # value is arbitrary but should be large enough to avoid # most locking stalls during resizes. TRANSFER_BUFFER_SIZE = 32 extend Util::Volatile attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. # Table initialization and resizing control. When negative, the # table is being initialized or resized. Otherwise, when table is # null, holds the initial table size to use upon creation, or 0 # for default. After initialization, holds the next element count # value upon which to resize the table. :size_control def initialize(options = nil) super() @counter = Util::Adder.new initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity end def get_or_default(key, else_value = nil) hash = key_hash(key) current_table = table while current_table node = current_table.volatile_get_by_hash(hash) current_table = while node if (node_hash = node.hash) == MOVED break node.key elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) return value end node = node.next end end else_value end def [](key) get_or_default(key) end def key?(key) get_or_default(key, NULL) != NULL end def []=(key, value) get_and_set(key, value) value end def compute_if_absent(key) hash = key_hash(key) current_table = table || initialize_table while true if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } if succeeded increment_size return new_value end elsif (node_hash = node.hash) == MOVED current_table = node.key elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) return current_value elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } return value if succeeded end end end def compute_if_present(key) new_value = nil internal_replace(key) do |old_value| if (new_value = yield(NULL == old_value ? nil : old_value)).nil? NULL else new_value end end new_value end def compute(key) internal_compute(key) do |old_value| if (new_value = yield(NULL == old_value ? nil : old_value)).nil? NULL else new_value end end end def merge_pair(key, value) internal_compute(key) do |old_value| if NULL == old_value || !(value = yield(old_value)).nil? value else NULL end end end def replace_pair(key, old_value, new_value) NULL != internal_replace(key, old_value) { new_value } end def replace_if_exists(key, new_value) if (result = internal_replace(key) { new_value }) && NULL != result result end end def get_and_set(key, value) # internalPut in the original CHMV8 hash = key_hash(key) current_table = table || initialize_table while true if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) if current_table.cas_new_node(i, hash, key, value) increment_size break end elsif (node_hash = node.hash) == MOVED current_table = node.key elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) break old_value if succeeded end end end def delete(key) replace_if_exists(key, NULL) end def delete_pair(key, value) result = internal_replace(key, value) { NULL } if result && NULL != result !!result else false end end def each_pair return self unless current_table = table current_table_size = base_size = current_table.size i = base_index = 0 while base_index < base_size if node = current_table.volatile_get(i) if node.hash == MOVED current_table = node.key current_table_size = current_table.size else begin if NULL != (value = node.value) # skip deleted or special nodes yield node.key, value end end while node = node.next end end if (i_with_base = i + base_size) < current_table_size i = i_with_base # visit upper slots if present else i = base_index += 1 end end self end def size (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values end def empty? size == 0 end # Implementation for clear. Steps through each bin, removing all nodes. def clear return self unless current_table = table current_table_size = current_table.size deleted_count = i = 0 while i < current_table_size if !(node = current_table.volatile_get(i)) i += 1 elsif (node_hash = node.hash) == MOVED current_table = node.key current_table_size = current_table.size elsif Node.locked_hash?(node_hash) decrement_size(deleted_count) # opportunistically update count deleted_count = 0 node.try_await_lock(current_table, i) else current_table.try_lock_via_hash(i, node, node_hash) do begin deleted_count += 1 if NULL != node.value # recheck under lock node.value = nil end while node = node.next current_table.volatile_set(i, nil) i += 1 end end end decrement_size(deleted_count) self end private # Internal versions of the insertion methods, each a # little more complicated than the last. All have # the same basic structure: # 1. If table uninitialized, create # 2. If bin empty, try to CAS new node # 3. If bin stale, use new table # 4. Lock and validate; if valid, scan and add or update # # The others interweave other checks and/or alternative actions: # * Plain +get_and_set+ checks for and performs resize after insertion. # * compute_if_absent prescans for mapping without lock (and fails to add # if present), which also makes pre-emptive resize checks worthwhile. # # Someday when details settle down a bit more, it might be worth # some factoring to reduce sprawl. def internal_replace(key, expected_old_value = NULL, &block) hash = key_hash(key) current_table = table while current_table if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) break elsif (node_hash = node.hash) == MOVED current_table = node.key elsif (node_hash & HASH_BITS) != hash && !node.next # precheck break # rules out possible existence elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) return old_value if succeeded end end NULL end def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) current_table.try_lock_via_hash(i, node, node_hash) do predecessor_node = nil old_value = NULL begin if node.matches?(key, hash) && NULL != (current_value = node.value) if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value old_value = current_value if NULL == (node.value = yield(old_value)) current_table.delete_node_at(i, node, predecessor_node) decrement_size end end break end predecessor_node = node end while node = node.next return true, old_value end end def find_value_in_node_list(node, key, hash, pure_hash) do_check_for_resize = false while true if pure_hash == hash && node.key?(key) && NULL != (value = node.value) return value elsif node = node.next do_check_for_resize = true # at least 2 nodes -> check for resize pure_hash = node.pure_hash else return NULL end end ensure check_for_resize if do_check_for_resize end def internal_compute(key, &block) hash = key_hash(key) current_table = table || initialize_table while true if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) if succeeded if NULL == new_value break nil else increment_size break new_value end end elsif (node_hash = node.hash) == MOVED current_table = node.key elsif Node.locked_hash?(node_hash) try_await_lock(current_table, i, node) else succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) break new_value if succeeded end end end def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) added = false current_table.try_lock_via_hash(i, node, node_hash) do while true if node.matches?(key, hash) && NULL != (value = node.value) return true, value end last = node unless node = node.next last.next = Node.new(hash, key, value = yield) added = true increment_size return true, value end end end ensure check_for_resize if added end def attempt_compute(key, hash, current_table, i, node, node_hash) added = false current_table.try_lock_via_hash(i, node, node_hash) do predecessor_node = nil while true if node.matches?(key, hash) && NULL != (value = node.value) if NULL == (node.value = value = yield(value)) current_table.delete_node_at(i, node, predecessor_node) decrement_size value = nil end return true, value end predecessor_node = node unless node = node.next if NULL == (value = yield(NULL)) value = nil else predecessor_node.next = Node.new(hash, key, value) added = true increment_size end return true, value end end end ensure check_for_resize if added end def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) node_nesting = nil current_table.try_lock_via_hash(i, node, node_hash) do node_nesting = 1 old_value = nil found_old_value = false while node if node.matches?(key, hash) && NULL != (old_value = node.value) found_old_value = true node.value = value break end last = node unless node = node.next last.next = Node.new(hash, key, value) break end node_nesting += 1 end return true, old_value if found_old_value increment_size true end ensure check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) end def initialize_copy(other) super @counter = Util::Adder.new self.table = nil self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY self end def try_await_lock(current_table, i, node) check_for_resize # try resizing if can't get lock node.try_await_lock(current_table, i) end def key_hash(key) key.hash & HASH_BITS end # Returns a power of two table size for the given desired capacity. def table_size_for(entry_count) size = 2 size <<= 1 while size < entry_count size end # Initializes table, using the size recorded in +size_control+. def initialize_table until current_table ||= table if (size_ctrl = size_control) == NOW_RESIZING Thread.pass # lost initialization race; just spin else try_in_resize_lock(current_table, size_ctrl) do initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY current_table = self.table = Table.new(initial_size) initial_size - (initial_size >> 2) # 75% load factor end end end current_table end # If table is too small and not already resizing, creates next table and # transfers bins. Rechecks occupancy after a transfer to see if another # resize is already needed because resizings are lagging additions. def check_for_resize while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum try_in_resize_lock(current_table, size_ctrl) do self.table = rebuild(current_table) (table_size << 1) - (table_size >> 1) # 75% load factor end end end def try_in_resize_lock(current_table, size_ctrl) if cas_size_control(size_ctrl, NOW_RESIZING) begin if current_table == table # recheck under lock size_ctrl = yield # get new size_control end ensure self.size_control = size_ctrl end end end # Moves and/or copies the nodes in each bin to new table. See above for explanation. def rebuild(table) old_table_size = table.size new_table = table.next_in_size_table # puts "#{old_table_size} -> #{new_table.size}" forwarder = Node.new(MOVED, new_table, NULL) rev_forwarder = nil locked_indexes = nil # holds bins to revisit; nil until needed locked_arr_idx = 0 bin = old_table_size - 1 i = bin while true if !(node = table.volatile_get(i)) # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) elsif Node.locked_hash?(node_hash = node.hash) locked_indexes ||= Array.new if bin < 0 && locked_arr_idx > 0 locked_arr_idx -= 1 i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin redo end if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE node.try_await_lock(table, i) # no other options -- block redo end rev_forwarder ||= Node.new(MOVED, table, NULL) redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list locked_indexes << i new_table.volatile_set(i, rev_forwarder) new_table.volatile_set(i + old_table_size, rev_forwarder) else redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) end if bin > 0 i = (bin -= 1) elsif locked_indexes && !locked_indexes.empty? bin = -1 i = locked_indexes.pop locked_arr_idx = locked_indexes.size - 1 else return new_table end end end def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) # transiently use a locked forwarding node locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) if old_table.cas(i, nil, locked_forwarder) new_table.volatile_set(i, nil) # kill the potential reverse forwarders new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders old_table.volatile_set(i, forwarder) locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) true end end # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. def split_old_bin(table, new_table, i, node, node_hash, forwarder) table.try_lock_via_hash(i, node, node_hash) do split_bin(new_table, i, node, node_hash) table.volatile_set(i, forwarder) end end def split_bin(new_table, i, node, node_hash) bit = new_table.size >> 1 # bit to split on run_bit = node_hash & bit last_run = nil low = nil high = nil current_node = node # this optimises for the lowest amount of volatile writes and objects created while current_node = current_node.next unless (b = current_node.hash & bit) == run_bit run_bit = b last_run = current_node end end if run_bit == 0 low = last_run else high = last_run end current_node = node until current_node == last_run pure_hash = current_node.pure_hash if (pure_hash & bit) == 0 low = Node.new(pure_hash, current_node.key, current_node.value, low) else high = Node.new(pure_hash, current_node.key, current_node.value, high) end current_node = current_node.next end new_table.volatile_set(i, low) new_table.volatile_set(i + bit, high) end def increment_size @counter.increment end def decrement_size(by = 1) @counter.add(-by) end end end thread_safe-0.3.5/lib/thread_safe/util.rb0000644000004100000410000000127412530443654020343 0ustar www-datawww-datamodule ThreadSafe module Util FIXNUM_BIT_SIZE = (0.size * 8) - 2 MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 CPU_COUNT = 16 # is there a way to determine this? autoload :AtomicReference, 'thread_safe/util/atomic_reference' autoload :Adder, 'thread_safe/util/adder' autoload :CheapLockable, 'thread_safe/util/cheap_lockable' autoload :PowerOfTwoTuple, 'thread_safe/util/power_of_two_tuple' autoload :Striped64, 'thread_safe/util/striped64' autoload :Volatile, 'thread_safe/util/volatile' autoload :VolatileTuple, 'thread_safe/util/volatile_tuple' autoload :XorShiftRandom, 'thread_safe/util/xor_shift_random' end end thread_safe-0.3.5/lib/thread_safe/synchronized_delegator.rb0000644000004100000410000000315212530443654024130 0ustar www-datawww-datarequire 'delegate' require 'monitor' # This class provides a trivial way to synchronize all calls to a given object # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls # around the delegated `#send`. Example: # # array = [] # not thread-safe on many impls # array = SynchronizedDelegator.new([]) # thread-safe # # A simple `Monitor` provides a very coarse-grained way to synchronize a given # object, in that it will cause synchronization for methods that have no need # for it, but this is a trivial way to get thread-safety where none may exist # currently on some implementations. # # This class is currently being considered for inclusion into stdlib, via # https://bugs.ruby-lang.org/issues/8556 class SynchronizedDelegator < SimpleDelegator def setup @old_abort = Thread.abort_on_exception Thread.abort_on_exception = true end def teardown Thread.abort_on_exception = @old_abort end def initialize(obj) __setobj__(obj) @monitor = Monitor.new end def method_missing(method, *args, &block) monitor = @monitor begin monitor.enter super ensure monitor.exit end end # Work-around for 1.8 std-lib not passing block around to delegate. # @private def method_missing(method, *args, &block) monitor = @monitor begin monitor.enter target = self.__getobj__ if target.respond_to?(method) target.__send__(method, *args, &block) else super(method, *args, &block) end ensure monitor.exit end end if RUBY_VERSION[0, 3] == '1.8' end unless defined?(SynchronizedDelegator) thread_safe-0.3.5/metadata.yml0000644000004100000410000000772512530443654016340 0ustar www-datawww-data--- !ruby/object:Gem::Specification name: thread_safe version: !ruby/object:Gem::Version version: 0.3.5 platform: ruby authors: - Charles Oliver Nutter - thedarkone autorequire: bindir: bin cert_chain: [] date: 2015-03-11 00:00:00.000000000 Z dependencies: - !ruby/object:Gem::Dependency name: atomic requirement: !ruby/object:Gem::Requirement requirements: - - '=' - !ruby/object:Gem::Version version: 1.1.16 type: :development prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - '=' - !ruby/object:Gem::Version version: 1.1.16 - !ruby/object:Gem::Dependency name: rake requirement: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' type: :development prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' - !ruby/object:Gem::Dependency name: minitest requirement: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '4' type: :development prerelease: false version_requirements: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '4' description: Thread-safe collections and utilities for Ruby email: - headius@headius.com - thedarkone2@gmail.com executables: [] extensions: [] extra_rdoc_files: [] files: - ".travis.yml" - ".yardopts" - Gemfile - LICENSE - README.md - Rakefile - examples/bench_cache.rb - ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java - ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java - ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java - ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java - ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java - ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java - ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java - ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java - ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java - ext/thread_safe/JrubyCacheBackendService.java - lib/thread_safe.rb - lib/thread_safe/atomic_reference_cache_backend.rb - lib/thread_safe/cache.rb - lib/thread_safe/mri_cache_backend.rb - lib/thread_safe/non_concurrent_cache_backend.rb - lib/thread_safe/synchronized_cache_backend.rb - lib/thread_safe/synchronized_delegator.rb - lib/thread_safe/util.rb - lib/thread_safe/util/adder.rb - lib/thread_safe/util/atomic_reference.rb - lib/thread_safe/util/cheap_lockable.rb - lib/thread_safe/util/power_of_two_tuple.rb - lib/thread_safe/util/striped64.rb - lib/thread_safe/util/volatile.rb - lib/thread_safe/util/volatile_tuple.rb - lib/thread_safe/util/xor_shift_random.rb - lib/thread_safe/version.rb - tasks/update_doc.rake - test/src/thread_safe/SecurityManager.java - test/test_array.rb - test/test_cache.rb - test/test_cache_loops.rb - test/test_hash.rb - test/test_helper.rb - test/test_synchronized_delegator.rb - thread_safe.gemspec - yard-template/default/fulldoc/html/css/common.css - yard-template/default/layout/html/footer.erb homepage: https://github.com/ruby-concurrency/thread_safe licenses: - Apache-2.0 metadata: {} post_install_message: rdoc_options: [] require_paths: - lib required_ruby_version: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' required_rubygems_version: !ruby/object:Gem::Requirement requirements: - - ">=" - !ruby/object:Gem::Version version: '0' requirements: [] rubyforge_project: rubygems_version: 2.4.6 signing_key: specification_version: 4 summary: A collection of data structures and utilities to make thread-safe programming in Ruby easier test_files: - test/src/thread_safe/SecurityManager.java - test/test_array.rb - test/test_cache.rb - test/test_cache_loops.rb - test/test_hash.rb - test/test_helper.rb - test/test_synchronized_delegator.rb has_rdoc: thread_safe-0.3.5/test/0000755000004100000410000000000012530443654015001 5ustar www-datawww-datathread_safe-0.3.5/test/test_hash.rb0000644000004100000410000000056712530443654017320 0ustar www-datawww-datarequire 'thread_safe' require File.join(File.dirname(__FILE__), "test_helper") class TestHash < Minitest::Test def test_concurrency hsh = ThreadSafe::Hash.new (1..THREADS).map do |i| Thread.new do 1000.times do |j| hsh[i*1000+j] = i hsh[i*1000+j] hsh.delete(i*1000+j) end end end.map(&:join) end end thread_safe-0.3.5/test/test_cache.rb0000644000004100000410000006070112530443654017434 0ustar www-datawww-datarequire 'thread_safe' require 'thread' require File.join(File.dirname(__FILE__), "test_helper") Thread.abort_on_exception = true class TestCache < Minitest::Test def setup @cache = ThreadSafe::Cache.new end def test_concurrency cache = @cache (1..THREADS).map do |i| Thread.new do 1000.times do |j| key = i*1000+j cache[key] = i cache[key] cache.delete(key) end end end.map(&:join) end def test_retrieval assert_size_change 1 do assert_equal nil, @cache[:a] assert_equal nil, @cache.get(:a) @cache[:a] = 1 assert_equal 1, @cache[:a] assert_equal 1, @cache.get(:a) end end def test_put_if_absent with_or_without_default_proc do assert_size_change 1 do assert_equal nil, @cache.put_if_absent(:a, 1) assert_equal 1, @cache.put_if_absent(:a, 1) assert_equal 1, @cache.put_if_absent(:a, 2) assert_equal 1, @cache[:a] end end end def test_compute_if_absent with_or_without_default_proc do assert_size_change 3 do assert_equal(1, (@cache.compute_if_absent(:a) {1})) assert_equal(1, (@cache.compute_if_absent(:a) {2})) assert_equal 1, @cache[:a] @cache[:b] = nil assert_equal(nil, (@cache.compute_if_absent(:b) {1})) assert_equal(nil, (@cache.compute_if_absent(:c) {})) assert_equal nil, @cache[:c] assert_equal true, @cache.key?(:c) end end end def test_compute_if_absent_with_return with_or_without_default_proc { assert_handles_return_lambda(:compute_if_absent, :a) } end def test_compute_if_absent_exception with_or_without_default_proc { assert_handles_exception(:compute_if_absent, :a) } end def test_compute_if_absent_atomicity late_compute_threads_count = 10 late_put_if_absent_threads_count = 10 getter_threads_count = 5 compute_started = ThreadSafe::Test::Latch.new(1) compute_proceed = ThreadSafe::Test::Latch.new(late_compute_threads_count + late_put_if_absent_threads_count + getter_threads_count) block_until_compute_started = lambda do |name| if (v = @cache[:a]) != nil assert_equal nil, v end compute_proceed.release compute_started.await end assert_size_change 1 do late_compute_threads = Array.new(late_compute_threads_count) do Thread.new do block_until_compute_started.call('compute_if_absent') assert_equal(1, (@cache.compute_if_absent(:a) { flunk })) end end late_put_if_absent_threads = Array.new(late_put_if_absent_threads_count) do Thread.new do block_until_compute_started.call('put_if_absent') assert_equal(1, @cache.put_if_absent(:a, 2)) end end getter_threads = Array.new(getter_threads_count) do Thread.new do block_until_compute_started.call('getter') Thread.pass while @cache[:a].nil? assert_equal 1, @cache[:a] end end Thread.new do @cache.compute_if_absent(:a) do compute_started.release compute_proceed.await sleep(0.2) 1 end end.join (late_compute_threads + late_put_if_absent_threads + getter_threads).each(&:join) end end def test_compute_if_present with_or_without_default_proc do assert_no_size_change do assert_equal(nil, @cache.compute_if_present(:a) {}) assert_equal(nil, @cache.compute_if_present(:a) {1}) assert_equal(nil, @cache.compute_if_present(:a) {flunk}) assert_equal false, @cache.key?(:a) end @cache[:a] = 1 assert_no_size_change do assert_equal(1, @cache.compute_if_present(:a) {1}) assert_equal(1, @cache[:a]) assert_equal(2, @cache.compute_if_present(:a) {2}) assert_equal(2, @cache[:a]) assert_equal(false, @cache.compute_if_present(:a) {false}) assert_equal(false, @cache[:a]) @cache[:a] = 1 yielded = false @cache.compute_if_present(:a) do |old_value| yielded = true assert_equal 1, old_value 2 end assert yielded end assert_size_change -1 do assert_equal(nil, @cache.compute_if_present(:a) {}) assert_equal(false, @cache.key?(:a)) assert_equal(nil, @cache.compute_if_present(:a) {1}) assert_equal(false, @cache.key?(:a)) end end end def test_compute_if_present_with_return with_or_without_default_proc do @cache[:a] = 1 assert_handles_return_lambda(:compute_if_present, :a) end end def test_compute_if_present_exception with_or_without_default_proc do @cache[:a] = 1 assert_handles_exception(:compute_if_present, :a) end end def test_compute with_or_without_default_proc do assert_no_size_change do assert_compute(:a, nil, nil) {} end assert_size_change 1 do assert_compute(:a, nil, 1) {1} assert_compute(:a, 1, 2) {2} assert_compute(:a, 2, false) {false} assert_equal false, @cache[:a] end assert_size_change -1 do assert_compute(:a, false, nil) {} end end end def test_compute_with_return with_or_without_default_proc do assert_handles_return_lambda(:compute, :a) @cache[:a] = 1 assert_handles_return_lambda(:compute, :a) end end def test_compute_exception with_or_without_default_proc do assert_handles_exception(:compute, :a) @cache[:a] = 1 assert_handles_exception(:compute, :a) end end def test_merge_pair with_or_without_default_proc do assert_size_change 1 do assert_equal(nil, @cache.merge_pair(:a, nil) {flunk}) assert_equal true, @cache.key?(:a) assert_equal nil, @cache[:a] end assert_no_size_change do assert_merge_pair(:a, nil, nil, false) {false} assert_merge_pair(:a, nil, false, 1) {1} assert_merge_pair(:a, nil, 1, 2) {2} end assert_size_change -1 do assert_merge_pair(:a, nil, 2, nil) {} assert_equal false, @cache.key?(:a) end end end def test_merge_pair_with_return with_or_without_default_proc do @cache[:a] = 1 assert_handles_return_lambda(:merge_pair, :a, 2) end end def test_merge_pair_exception with_or_without_default_proc do @cache[:a] = 1 assert_handles_exception(:merge_pair, :a, 2) end end def test_updates_dont_block_reads getters_count = 20 key_klass = ThreadSafe::Test::HashCollisionKey keys = [key_klass.new(1, 100), key_klass.new(2, 100), key_klass.new(3, 100)] # hash colliding keys inserted_keys = [] keys.each do |key, i| compute_started = ThreadSafe::Test::Latch.new(1) compute_finished = ThreadSafe::Test::Latch.new(1) getters_started = ThreadSafe::Test::Latch.new(getters_count) getters_finished = ThreadSafe::Test::Latch.new(getters_count) computer_thread = Thread.new do getters_started.await @cache.compute_if_absent(key) do compute_started.release getters_finished.await 1 end compute_finished.release end getter_threads = (1..getters_count).map do Thread.new do getters_started.release inserted_keys.each do |inserted_key| assert_equal true, @cache.key?(inserted_key) assert_equal 1, @cache[inserted_key] end assert_equal false, @cache.key?(key) compute_started.await inserted_keys.each do |inserted_key| assert_equal true, @cache.key?(inserted_key) assert_equal 1, @cache[inserted_key] end assert_equal false, @cache.key?(key) assert_equal nil, @cache[key] getters_finished.release compute_finished.await assert_equal true, @cache.key?(key) assert_equal 1, @cache[key] end end (getter_threads << computer_thread).map {|t| assert(t.join(2))} # asserting no deadlocks inserted_keys << key end end def test_collision_resistance assert_collision_resistance((0..1000).map {|i| ThreadSafe::Test::HashCollisionKey(i, 1)}) end def test_collision_resistance_with_arrays special_array_class = Class.new(Array) do def key # assert_collision_resistance expects to be able to call .key to get the "real" key first.key end end # Test collision resistance with a keys that say they responds_to <=>, but then raise exceptions # when actually called (ie: an Array filled with non-comparable keys). # See https://github.com/headius/thread_safe/issues/19 for more info. assert_collision_resistance((0..100).map do |i| special_array_class.new([ThreadSafe::Test::HashCollisionKeyNonComparable.new(i, 1)]) end) end def test_replace_pair with_or_without_default_proc do assert_no_size_change do assert_equal false, @cache.replace_pair(:a, 1, 2) assert_equal false, @cache.replace_pair(:a, nil, nil) assert_equal false, @cache.key?(:a) end @cache[:a] = 1 assert_no_size_change do assert_equal true, @cache.replace_pair(:a, 1, 2) assert_equal false, @cache.replace_pair(:a, 1, 2) assert_equal 2, @cache[:a] assert_equal true, @cache.replace_pair(:a, 2, 2) assert_equal 2, @cache[:a] assert_equal true, @cache.replace_pair(:a, 2, nil) assert_equal false, @cache.replace_pair(:a, 2, nil) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) assert_equal true, @cache.replace_pair(:a, nil, nil) assert_equal true, @cache.key?(:a) assert_equal true, @cache.replace_pair(:a, nil, 1) assert_equal 1, @cache[:a] end end end def test_replace_if_exists with_or_without_default_proc do assert_no_size_change do assert_equal nil, @cache.replace_if_exists(:a, 1) assert_equal false, @cache.key?(:a) end @cache[:a] = 1 assert_no_size_change do assert_equal 1, @cache.replace_if_exists(:a, 2) assert_equal 2, @cache[:a] assert_equal 2, @cache.replace_if_exists(:a, nil) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) assert_equal nil, @cache.replace_if_exists(:a, 1) assert_equal 1, @cache[:a] end end end def test_get_and_set with_or_without_default_proc do assert_size_change 1 do assert_equal nil, @cache.get_and_set(:a, 1) assert_equal true, @cache.key?(:a) assert_equal 1, @cache[:a] assert_equal 1, @cache.get_and_set(:a, 2) assert_equal 2, @cache.get_and_set(:a, nil) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) assert_equal nil, @cache.get_and_set(:a, 1) assert_equal 1, @cache[:a] end end end def test_key with_or_without_default_proc do assert_equal nil, @cache.key(1) @cache[:a] = 1 assert_equal :a, @cache.key(1) assert_equal nil, @cache.key(0) assert_equal :a, @cache.index(1) if RUBY_VERSION =~ /1\.8/ end end def test_key? with_or_without_default_proc do assert_equal false, @cache.key?(:a) @cache[:a] = 1 assert_equal true, @cache.key?(:a) end end def test_value? with_or_without_default_proc do assert_equal false, @cache.value?(1) @cache[:a] = 1 assert_equal true, @cache.value?(1) end end def test_delete with_or_without_default_proc do |default_proc_set| assert_no_size_change do assert_equal nil, @cache.delete(:a) end @cache[:a] = 1 assert_size_change -1 do assert_equal 1, @cache.delete(:a) end assert_no_size_change do assert_equal nil, @cache[:a] unless default_proc_set assert_equal false, @cache.key?(:a) assert_equal nil, @cache.delete(:a) end end end def test_delete_pair with_or_without_default_proc do assert_no_size_change do assert_equal false, @cache.delete_pair(:a, 2) assert_equal false, @cache.delete_pair(:a, nil) end @cache[:a] = 1 assert_no_size_change do assert_equal false, @cache.delete_pair(:a, 2) end assert_size_change -1 do assert_equal 1, @cache[:a] assert_equal true, @cache.delete_pair(:a, 1) assert_equal false, @cache.delete_pair(:a, 1) assert_equal false, @cache.key?(:a) end end end def test_default_proc @cache = cache_with_default_proc(1) assert_no_size_change do assert_equal false, @cache.key?(:a) end assert_size_change 1 do assert_equal 1, @cache[:a] assert_equal true, @cache.key?(:a) end end def test_falsy_default_proc @cache = cache_with_default_proc(nil) assert_no_size_change do assert_equal false, @cache.key?(:a) end assert_size_change 1 do assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) end end def test_fetch with_or_without_default_proc do |default_proc_set| assert_no_size_change do assert_equal 1, @cache.fetch(:a, 1) assert_equal(1, (@cache.fetch(:a) {1})) assert_equal false, @cache.key?(:a) assert_equal nil, @cache[:a] unless default_proc_set end @cache[:a] = 1 assert_no_size_change do assert_equal(1, (@cache.fetch(:a) {flunk})) end assert_raises(ThreadSafe::Cache::KEY_ERROR) do @cache.fetch(:b) end assert_no_size_change do assert_equal 1, (@cache.fetch(:b, :c) {1}) # assert block supersedes default value argument assert_equal false, @cache.key?(:b) end end end def test_falsy_fetch with_or_without_default_proc do assert_equal false, @cache.key?(:a) assert_no_size_change do assert_equal(nil, @cache.fetch(:a, nil)) assert_equal(false, @cache.fetch(:a, false)) assert_equal(nil, (@cache.fetch(:a) {})) assert_equal(false, (@cache.fetch(:a) {false})) end @cache[:a] = nil assert_no_size_change do assert_equal true, @cache.key?(:a) assert_equal(nil, (@cache.fetch(:a) {flunk})) end end end def test_fetch_with_return with_or_without_default_proc do r = lambda do @cache.fetch(:a) { return 10 } end.call assert_no_size_change do assert_equal 10, r assert_equal false, @cache.key?(:a) end end end def test_fetch_or_store with_or_without_default_proc do |default_proc_set| assert_size_change 1 do assert_equal 1, @cache.fetch_or_store(:a, 1) assert_equal 1, @cache[:a] end @cache.delete(:a) assert_size_change 1 do assert_equal 1, (@cache.fetch_or_store(:a) {1}) assert_equal 1, @cache[:a] end assert_no_size_change do assert_equal(1, (@cache.fetch_or_store(:a) {flunk})) end assert_raises(ThreadSafe::Cache::KEY_ERROR) do @cache.fetch_or_store(:b) end assert_size_change 1 do assert_equal 1, (@cache.fetch_or_store(:b, :c) {1}) # assert block supersedes default value argument assert_equal 1, @cache[:b] end end end def test_falsy_fetch_or_store with_or_without_default_proc do assert_equal false, @cache.key?(:a) assert_size_change 1 do assert_equal(nil, @cache.fetch_or_store(:a, nil)) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) end @cache.delete(:a) assert_size_change 1 do assert_equal(false, @cache.fetch_or_store(:a, false)) assert_equal false, @cache[:a] assert_equal true, @cache.key?(:a) end @cache.delete(:a) assert_size_change 1 do assert_equal(nil, (@cache.fetch_or_store(:a) {})) assert_equal nil, @cache[:a] assert_equal true, @cache.key?(:a) end @cache.delete(:a) assert_size_change 1 do assert_equal(false, (@cache.fetch_or_store(:a) {false})) assert_equal false, @cache[:a] assert_equal true, @cache.key?(:a) end @cache[:a] = nil assert_no_size_change do assert_equal(nil, (@cache.fetch_or_store(:a) {flunk})) end end end def test_fetch_or_store_with_return with_or_without_default_proc do r = lambda do @cache.fetch_or_store(:a) { return 10 } end.call assert_no_size_change do assert_equal 10, r assert_equal false, @cache.key?(:a) end end end def test_clear @cache[:a] = 1 assert_size_change -1 do assert_equal @cache, @cache.clear assert_equal false, @cache.key?(:a) assert_equal nil, @cache[:a] end end def test_each_pair @cache.each_pair {|k, v| flunk} assert_equal(@cache, (@cache.each_pair {})) @cache[:a] = 1 h = {} @cache.each_pair {|k, v| h[k] = v} assert_equal({:a => 1}, h) @cache[:b] = 2 h = {} @cache.each_pair {|k, v| h[k] = v} assert_equal({:a => 1, :b => 2}, h) end def test_each_pair_iterator @cache[:a] = 1 @cache[:b] = 2 i = 0 r = @cache.each_pair do |k, v| if i == 0 i += 1 next flunk elsif i == 1 break :breaked end end assert_equal :breaked, r end def test_each_pair_allows_modification @cache[:a] = 1 @cache[:b] = 1 @cache[:c] = 1 assert_size_change 1 do @cache.each_pair do |k, v| @cache[:z] = 1 end end end def test_keys assert_equal [], @cache.keys @cache[1] = 1 assert_equal [1], @cache.keys @cache[2] = 2 assert_equal [1, 2], @cache.keys.sort end def test_values assert_equal [], @cache.values @cache[1] = 1 assert_equal [1], @cache.values @cache[2] = 2 assert_equal [1, 2], @cache.values.sort end def test_each_key assert_equal(@cache, (@cache.each_key {flunk})) @cache[1] = 1 arr = [] @cache.each_key {|k| arr << k} assert_equal [1], arr @cache[2] = 2 arr = [] @cache.each_key {|k| arr << k} assert_equal [1, 2], arr.sort end def test_each_value assert_equal(@cache, (@cache.each_value {flunk})) @cache[1] = 1 arr = [] @cache.each_value {|k| arr << k} assert_equal [1], arr @cache[2] = 2 arr = [] @cache.each_value {|k| arr << k} assert_equal [1, 2], arr.sort end def test_empty assert_equal true, @cache.empty? @cache[:a] = 1 assert_equal false, @cache.empty? end def test_options_validation assert_valid_options(nil) assert_valid_options({}) assert_valid_options(:foo => :bar) end def test_initial_capacity_options_validation assert_valid_option(:initial_capacity, nil) assert_valid_option(:initial_capacity, 1) assert_invalid_option(:initial_capacity, '') assert_invalid_option(:initial_capacity, 1.0) assert_invalid_option(:initial_capacity, -1) end def test_load_factor_options_validation assert_valid_option(:load_factor, nil) assert_valid_option(:load_factor, 0.01) assert_valid_option(:load_factor, 0.75) assert_valid_option(:load_factor, 1) assert_invalid_option(:load_factor, '') assert_invalid_option(:load_factor, 0) assert_invalid_option(:load_factor, 1.1) assert_invalid_option(:load_factor, 2) assert_invalid_option(:load_factor, -1) end def test_size assert_equal 0, @cache.size @cache[:a] = 1 assert_equal 1, @cache.size @cache[:b] = 1 assert_equal 2, @cache.size @cache.delete(:a) assert_equal 1, @cache.size @cache.delete(:b) assert_equal 0, @cache.size end def test_get_or_default with_or_without_default_proc do assert_equal 1, @cache.get_or_default(:a, 1) assert_equal nil, @cache.get_or_default(:a, nil) assert_equal false, @cache.get_or_default(:a, false) assert_equal false, @cache.key?(:a) @cache[:a] = 1 assert_equal 1, @cache.get_or_default(:a, 2) end end def test_dup_clone [:dup, :clone].each do |meth| cache = cache_with_default_proc(:default_value) cache[:a] = 1 dupped = cache.send(meth) assert_equal 1, dupped[:a] assert_equal 1, dupped.size assert_size_change 1, cache do assert_no_size_change dupped do cache[:b] = 1 end end assert_equal false, dupped.key?(:b) assert_no_size_change cache do assert_size_change -1, dupped do dupped.delete(:a) end end assert_equal false, dupped.key?(:a) assert_equal true, cache.key?(:a) # test default proc assert_size_change 1, cache do assert_no_size_change dupped do assert_equal :default_value, cache[:c] assert_equal false, dupped.key?(:c) end end assert_no_size_change cache do assert_size_change 1, dupped do assert_equal :default_value, dupped[:d] assert_equal false, cache.key?(:d) end end end end def test_is_unfreezable assert_raises(NoMethodError) { @cache.freeze } end def test_marshal_dump_load new_cache = Marshal.load(Marshal.dump(@cache)) assert_instance_of ThreadSafe::Cache, new_cache assert_equal 0, new_cache.size @cache[:a] = 1 new_cache = Marshal.load(Marshal.dump(@cache)) assert_equal 1, @cache[:a] assert_equal 1, new_cache.size end def test_marshal_dump_doesnt_work_with_default_proc assert_raises(TypeError) do Marshal.dump(ThreadSafe::Cache.new {}) end end private def with_or_without_default_proc yield false @cache = ThreadSafe::Cache.new {|h, k| h[k] = :default_value} yield true end def cache_with_default_proc(default_value = 1) ThreadSafe::Cache.new {|cache, k| cache[k] = default_value} end def assert_valid_option(option_name, value) assert_valid_options(option_name => value) end def assert_valid_options(options) c = ThreadSafe::Cache.new(options) assert_instance_of ThreadSafe::Cache, c end def assert_invalid_option(option_name, value) assert_invalid_options(option_name => value) end def assert_invalid_options(options) assert_raises(ArgumentError) { ThreadSafe::Cache.new(options) } end def assert_size_change(change, cache = @cache) start = cache.size yield assert_equal change, cache.size - start end def assert_no_size_change(cache = @cache, &block) assert_size_change(0, cache, &block) end def assert_handles_return_lambda(method, key, *args) before_had_key = @cache.key?(key) before_had_value = before_had_key ? @cache[key] : nil returning_lambda = lambda do @cache.send(method, key, *args) { return :direct_return } end assert_no_size_change do assert_equal(:direct_return, returning_lambda.call) assert_equal before_had_key, @cache.key?(key) assert_equal before_had_value, @cache[key] if before_had_value end end class TestException < Exception; end def assert_handles_exception(method, key, *args) before_had_key = @cache.key?(key) before_had_value = before_had_key ? @cache[key] : nil assert_no_size_change do assert_raises(TestException) do @cache.send(method, key, *args) { raise TestException, '' } end assert_equal before_had_key, @cache.key?(key) assert_equal before_had_value, @cache[key] if before_had_value end end def assert_compute(key, expected_old_value, expected_result) result = @cache.compute(:a) do |old_value| assert_equal expected_old_value, old_value yield end assert_equal expected_result, result end def assert_merge_pair(key, value, expected_old_value, expected_result) result = @cache.merge_pair(key, value) do |old_value| assert_equal expected_old_value, old_value yield end assert_equal expected_result, result end def assert_collision_resistance(keys) keys.each {|k| @cache[k] = k.key} 10.times do |i| size = keys.size while i < size k = keys[i] assert(k.key == @cache.delete(k) && !@cache.key?(k) && (@cache[k] = k.key; @cache[k] == k.key)) i += 10 end end assert(keys.all? {|k| @cache[k] == k.key}) end end thread_safe-0.3.5/test/src/0000755000004100000410000000000012530443654015570 5ustar www-datawww-datathread_safe-0.3.5/test/src/thread_safe/0000755000004100000410000000000012530443654020035 5ustar www-datawww-datathread_safe-0.3.5/test/src/thread_safe/SecurityManager.java0000644000004100000410000000075012530443654024004 0ustar www-datawww-datapackage thread_safe; import java.security.Permission; import java.util.ArrayList; import java.util.List; public class SecurityManager extends java.lang.SecurityManager { private final List deniedPermissions = new ArrayList(); @Override public void checkPermission(Permission p) { if (deniedPermissions.contains(p)) { throw new SecurityException("Denied!"); } } public void deny(Permission p) { deniedPermissions.add(p); } } thread_safe-0.3.5/test/test_helper.rb0000644000004100000410000000621612530443654017651 0ustar www-datawww-dataunless defined?(JRUBY_VERSION) require 'simplecov' require 'coveralls' SimpleCov.formatter = SimpleCov::Formatter::MultiFormatter[ SimpleCov::Formatter::HTMLFormatter, Coveralls::SimpleCov::Formatter ] SimpleCov.start do project_name 'thread_safe' add_filter '/examples/' add_filter '/pkg/' add_filter '/test/' add_filter '/tasks/' add_filter '/yard-template/' add_filter '/yardoc/' command_name 'Mintest' end end require 'minitest/autorun' require 'minitest/reporters' Minitest::Reporters.use! Minitest::Reporters::SpecReporter.new(color: true) require 'thread' require 'thread_safe' THREADS = (RUBY_ENGINE == 'ruby' ? 100 : 10) if defined?(JRUBY_VERSION) && ENV['TEST_NO_UNSAFE'] # to be used like this: rake test TEST_NO_UNSAFE=true load 'test/package.jar' java_import 'thread_safe.SecurityManager' manager = SecurityManager.new # Prevent accessing internal classes manager.deny java.lang.RuntimePermission.new("accessClassInPackage.sun.misc") java.lang.System.setSecurityManager manager class TestNoUnsafe < Minitest::Test def test_security_manager_is_used begin java_import 'sun.misc.Unsafe' flunk rescue SecurityError end end def test_no_unsafe_version_of_chmv8_is_used require 'thread_safe/jruby_cache_backend' # make sure the jar has been loaded assert !Java::OrgJrubyExtThread_safe::JRubyCacheBackendLibrary::JRubyCacheBackend::CAN_USE_UNSAFE_CHM end end end module ThreadSafe module Test class Latch def initialize(count = 1) @count = count @mutex = Mutex.new @cond = ConditionVariable.new end def release @mutex.synchronize do @count -= 1 if @count > 0 @cond.broadcast if @count.zero? end end def await @mutex.synchronize do @cond.wait @mutex if @count > 0 end end end class Barrier < Latch def await @mutex.synchronize do if @count.zero? # fall through elsif @count > 0 @count -= 1 @count.zero? ? @cond.broadcast : @cond.wait(@mutex) end end end end class HashCollisionKey attr_reader :hash, :key def initialize(key, hash = key.hash % 3) @key = key @hash = hash end def eql?(other) other.kind_of?(self.class) && @key.eql?(other.key) end def even? @key.even? end def <=>(other) @key <=> other.key end end # having 4 separate HCK classes helps for a more thorough CHMV8 testing class HashCollisionKey2 < HashCollisionKey; end class HashCollisionKeyNoCompare < HashCollisionKey def <=>(other) 0 end end class HashCollisionKey4 < HashCollisionKeyNoCompare; end HASH_COLLISION_CLASSES = [HashCollisionKey, HashCollisionKey2, HashCollisionKeyNoCompare, HashCollisionKey4] def self.HashCollisionKey(key, hash = key.hash % 3) HASH_COLLISION_CLASSES[rand(4)].new(key, hash) end class HashCollisionKeyNonComparable < HashCollisionKey undef <=> end end end thread_safe-0.3.5/test/test_array.rb0000644000004100000410000000057312530443654017510 0ustar www-datawww-datarequire 'thread_safe' require File.join(File.dirname(__FILE__), "test_helper") class TestArray < Minitest::Test def test_concurrency ary = ThreadSafe::Array.new (1..THREADS).map do |i| Thread.new do 1000.times do ary << i ary.each {|x| x * 2} ary.shift ary.last end end end.map(&:join) end end thread_safe-0.3.5/test/test_synchronized_delegator.rb0000644000004100000410000000332612530443654023136 0ustar www-datawww-datarequire 'thread_safe/synchronized_delegator.rb' require File.join(File.dirname(__FILE__), "test_helper") class TestSynchronizedDelegator < Minitest::Test def test_wraps_array sync_array = SynchronizedDelegator.new(array = []) array << 1 assert_equal 1, sync_array[0] sync_array << 2 assert_equal 2, array[1] end def test_synchronizes_access t1_continue, t2_continue = false, false hash = Hash.new do |hash, key| t2_continue = true unless hash.find { |e| e[1] == key.to_s } # just to do something hash[key] = key.to_s Thread.pass until t1_continue end end sync_hash = SynchronizedDelegator.new(hash) sync_hash[1] = 'egy' t1 = Thread.new do sync_hash[2] = 'dva' sync_hash[3] # triggers t2_continue end t2 = Thread.new do Thread.pass until t2_continue sync_hash[4] = '42' end sleep(0.05) # sleep some to allow threads to boot until t2.status == 'sleep' do Thread.pass end assert_equal 3, hash.keys.size t1_continue = true t1.join; t2.join assert_equal 4, sync_hash.size end def test_synchronizes_access_with_block t1_continue, t2_continue = false, false sync_array = SynchronizedDelegator.new(array = []) t1 = Thread.new do sync_array << 1 sync_array.each do t2_continue = true Thread.pass until t1_continue end end t2 = Thread.new do # sleep(0.01) Thread.pass until t2_continue sync_array << 2 end until t2.status == 'sleep' || t2.status == false do Thread.pass end assert_equal 1, array.size t1_continue = true t1.join; t2.join assert_equal [1, 2], array end end thread_safe-0.3.5/test/test_cache_loops.rb0000644000004100000410000003425512530443654020655 0ustar www-datawww-datarequire 'thread' require 'thread_safe' require File.join(File.dirname(__FILE__), "test_helper") Thread.abort_on_exception = true class TestCacheTorture < Minitest::Test # this is not run unless RUBY_VERSION =~ /1\.8/ || ENV['TRAVIS'] (see the end of the file) THREAD_COUNT = 40 KEY_COUNT = (((2**13) - 2) * 0.75).to_i # get close to the doubling cliff LOW_KEY_COUNT = (((2**8 ) - 2) * 0.75).to_i # get close to the doubling cliff INITIAL_VALUE_CACHE_SETUP = lambda do |options, keys| cache = ThreadSafe::Cache.new initial_value = options[:initial_value] || 0 keys.each {|key| cache[key] = initial_value} cache end ZERO_VALUE_CACHE_SETUP = lambda do |options, keys| INITIAL_VALUE_CACHE_SETUP.call(options.merge(:initial_value => 0), keys) end DEFAULTS = { :key_count => KEY_COUNT, :thread_count => THREAD_COUNT, :loop_count => 1, :prelude => '', :cache_setup => lambda {|options, keys| ThreadSafe::Cache.new} } LOW_KEY_COUNT_OPTIONS = {:loop_count => 150, :key_count => LOW_KEY_COUNT} SINGLE_KEY_COUNT_OPTIONS = {:loop_count => 100_000, :key_count => 1} def test_concurrency code = <<-RUBY_EVAL cache[key] cache[key] = key cache[key] cache.delete(key) RUBY_EVAL do_thread_loop(__method__, code) end def test_put_if_absent do_thread_loop(__method__, 'acc += 1 unless cache.put_if_absent(key, key)', :key_count => 100_000) do |result, cache, options, keys| assert_standard_accumulator_test_result(result, cache, options, keys) end end def test_compute_if_absent code = 'cache.compute_if_absent(key) { acc += 1; key }' do_thread_loop(__method__, code) do |result, cache, options, keys| assert_standard_accumulator_test_result(result, cache, options, keys) end end def test_compute_put_if_absent code = <<-RUBY_EVAL if key.even? cache.compute_if_absent(key) { acc += 1; key } else acc += 1 unless cache.put_if_absent(key, key) end RUBY_EVAL do_thread_loop(__method__, code) do |result, cache, options, keys| assert_standard_accumulator_test_result(result, cache, options, keys) end end def test_compute_if_absent_and_present compute_if_absent_and_present compute_if_absent_and_present(LOW_KEY_COUNT_OPTIONS) compute_if_absent_and_present(SINGLE_KEY_COUNT_OPTIONS) end def test_add_remove_to_zero add_remove_to_zero add_remove_to_zero(LOW_KEY_COUNT_OPTIONS) add_remove_to_zero(SINGLE_KEY_COUNT_OPTIONS) end def test_add_remove_to_zero_via_merge_pair add_remove_to_zero_via_merge_pair add_remove_to_zero_via_merge_pair(LOW_KEY_COUNT_OPTIONS) add_remove_to_zero_via_merge_pair(SINGLE_KEY_COUNT_OPTIONS) end def test_add_remove add_remove add_remove(LOW_KEY_COUNT_OPTIONS) add_remove(SINGLE_KEY_COUNT_OPTIONS) end def test_add_remove_via_compute add_remove_via_compute add_remove_via_compute(LOW_KEY_COUNT_OPTIONS) add_remove_via_compute(SINGLE_KEY_COUNT_OPTIONS) end def add_remove_via_compute_if_absent_present add_remove_via_compute_if_absent_present add_remove_via_compute_if_absent_present(LOW_KEY_COUNT_OPTIONS) add_remove_via_compute_if_absent_present(SINGLE_KEY_COUNT_OPTIONS) end def test_add_remove_indiscriminate add_remove_indiscriminate add_remove_indiscriminate(LOW_KEY_COUNT_OPTIONS) add_remove_indiscriminate(SINGLE_KEY_COUNT_OPTIONS) end def test_count_up count_up count_up(LOW_KEY_COUNT_OPTIONS) count_up(SINGLE_KEY_COUNT_OPTIONS) end def test_count_up_via_compute count_up_via_compute count_up_via_compute(LOW_KEY_COUNT_OPTIONS) count_up_via_compute(SINGLE_KEY_COUNT_OPTIONS) end def test_count_up_via_merge_pair count_up_via_merge_pair count_up_via_merge_pair(LOW_KEY_COUNT_OPTIONS) count_up_via_merge_pair(SINGLE_KEY_COUNT_OPTIONS) end def test_count_race prelude = 'change = (rand(2) == 1) ? 1 : -1' code = <<-RUBY_EVAL v = cache[key] acc += change if cache.replace_pair(key, v, v + change) RUBY_EVAL do_thread_loop(__method__, code, :loop_count => 5, :prelude => prelude, :cache_setup => ZERO_VALUE_CACHE_SETUP) do |result, cache, options, keys| result_sum = sum(result) assert_equal(sum(keys.map {|key| cache[key]}), result_sum) assert_equal(sum(cache.values), result_sum) assert_equal(options[:key_count], cache.size) end end def test_get_and_set_new code = 'acc += 1 unless cache.get_and_set(key, key)' do_thread_loop(__method__, code) do |result, cache, options, keys| assert_standard_accumulator_test_result(result, cache, options, keys) end end def test_get_and_set_existing code = 'acc += 1 if cache.get_and_set(key, key) == -1' do_thread_loop(__method__, code, :cache_setup => INITIAL_VALUE_CACHE_SETUP, :initial_value => -1) do |result, cache, options, keys| assert_standard_accumulator_test_result(result, cache, options, keys) end end private def compute_if_absent_and_present(opts = {}) prelude = 'on_present = rand(2) == 1' code = <<-RUBY_EVAL if on_present cache.compute_if_present(key) {|old_value| acc += 1; old_value + 1} else cache.compute_if_absent(key) { acc += 1; 1 } end RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5, :prelude => prelude}.merge(opts)) do |result, cache, options, keys| stored_sum = 0 stored_key_count = 0 keys.each do |k| if value = cache[k] stored_sum += value stored_key_count += 1 end end assert_equal(stored_sum, sum(result)) assert_equal(stored_key_count, cache.size) end end def add_remove(opts = {}) prelude = 'do_add = rand(2) == 1' code = <<-RUBY_EVAL if do_add acc += 1 unless cache.put_if_absent(key, key) else acc -= 1 if cache.delete_pair(key, key) end RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5, :prelude => prelude}.merge(opts)) do |result, cache, options, keys| assert_all_key_mappings_exist(cache, keys, false) assert_equal(cache.size, sum(result)) end end def add_remove_via_compute(opts = {}) prelude = 'do_add = rand(2) == 1' code = <<-RUBY_EVAL cache.compute(key) do |old_value| if do_add acc += 1 unless old_value key else acc -= 1 if old_value nil end end RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5, :prelude => prelude}.merge(opts)) do |result, cache, options, keys| assert_all_key_mappings_exist(cache, keys, false) assert_equal(cache.size, sum(result)) end end def add_remove_via_compute_if_absent_present(opts = {}) prelude = 'do_add = rand(2) == 1' code = <<-RUBY_EVAL if do_add cache.compute_if_absent(key) { acc += 1; key } else cache.compute_if_present(key) { acc -= 1; nil } end RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5, :prelude => prelude}.merge(opts)) do |result, cache, options, keys| assert_all_key_mappings_exist(cache, keys, false) assert_equal(cache.size, sum(result)) end end def add_remove_indiscriminate(opts = {}) prelude = 'do_add = rand(2) == 1' code = <<-RUBY_EVAL if do_add acc += 1 unless cache.put_if_absent(key, key) else acc -= 1 if cache.delete(key) end RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5, :prelude => prelude}.merge(opts)) do |result, cache, options, keys| assert_all_key_mappings_exist(cache, keys, false) assert_equal(cache.size, sum(result)) end end def count_up(opts = {}) code = <<-RUBY_EVAL v = cache[key] acc += 1 if cache.replace_pair(key, v, v + 1) RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5, :cache_setup => ZERO_VALUE_CACHE_SETUP}.merge(opts)) do |result, cache, options, keys| assert_count_up(result, cache, options, keys) end end def count_up_via_compute(opts = {}) code = <<-RUBY_EVAL cache.compute(key) do |old_value| acc += 1 old_value ? old_value + 1 : 1 end RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5}.merge(opts)) do |result, cache, options, keys| assert_count_up(result, cache, options, keys) result.inject(nil) do |previous_value, next_value| # since compute guarantees atomicity all count ups should be equal assert_equal previous_value, next_value if previous_value next_value end end end def count_up_via_merge_pair(opts = {}) code = <<-RUBY_EVAL cache.merge_pair(key, 1) {|old_value| old_value + 1} RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5}.merge(opts)) do |result, cache, options, keys| all_match = true expected_value = options[:loop_count] * options[:thread_count] keys.each do |key| if expected_value != (value = cache[key]) all_match = false break end end assert all_match end end def add_remove_to_zero(opts = {}) code = <<-RUBY_EVAL acc += 1 unless cache.put_if_absent(key, key) acc -= 1 if cache.delete_pair(key, key) RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5}.merge(opts)) do |result, cache, options, keys| assert_all_key_mappings_exist(cache, keys, false) assert_equal(cache.size, sum(result)) end end def add_remove_to_zero_via_merge_pair(opts = {}) code = <<-RUBY_EVAL acc += (cache.merge_pair(key, key) {}) ? 1 : -1 RUBY_EVAL do_thread_loop(__method__, code, {:loop_count => 5}.merge(opts)) do |result, cache, options, keys| assert_all_key_mappings_exist(cache, keys, false) assert_equal(cache.size, sum(result)) end end def do_thread_loop(name, code, options = {}, &block) options = DEFAULTS.merge(options) meth = define_loop name, code, options[:prelude] keys = to_keys_array(options[:key_count]) run_thread_loop(meth, keys, options, &block) if options[:key_count] > 1 options[:key_count] = (options[:key_count] / 40).to_i keys = to_hash_collision_keys_array(options[:key_count]) run_thread_loop(meth, keys, options.merge(:loop_count => (options[:loop_count] * 5)), &block) end end def run_thread_loop(meth, keys, options) cache = options[:cache_setup].call(options, keys) barrier = ThreadSafe::Test::Barrier.new(options[:thread_count]) result = (1..options[:thread_count]).map do Thread.new do setup_sync_and_start_loop(meth, cache, keys, barrier, options[:loop_count]) end end.map(&:value) yield result, cache, options, keys if block_given? end def setup_sync_and_start_loop(meth, cache, keys, barrier, loop_count) my_keys = keys.shuffle barrier.await if my_keys.size == 1 key = my_keys.first send("#{meth}_single_key", cache, key, loop_count) else send("#{meth}_multiple_keys", cache, my_keys, loop_count) end end def define_loop(name, body, prelude) inner_meth_name = :"_#{name}_loop_inner" outer_meth_name = :"_#{name}_loop_outer" # looping is splitted into the "loop methods" to trigger the JIT self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{inner_meth_name}_multiple_keys(cache, keys, i, length, acc) #{prelude} target = i + length while i < target key = keys[i] #{body} i += 1 end acc end unless method_defined?(:#{inner_meth_name}_multiple_keys) RUBY_EVAL self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{inner_meth_name}_single_key(cache, key, i, length, acc) #{prelude} target = i + length while i < target #{body} i += 1 end acc end unless method_defined?(:#{inner_meth_name}_single_key) RUBY_EVAL self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{outer_meth_name}_multiple_keys(cache, keys, loop_count) total_length = keys.size acc = 0 inc = 100 loop_count.times do i = 0 pre_loop_inc = total_length % inc acc = #{inner_meth_name}_multiple_keys(cache, keys, i, pre_loop_inc, acc) i += pre_loop_inc while i < total_length acc = #{inner_meth_name}_multiple_keys(cache, keys, i, inc, acc) i += inc end end acc end unless method_defined?(:#{outer_meth_name}_multiple_keys) RUBY_EVAL self.class.class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 def #{outer_meth_name}_single_key(cache, key, loop_count) acc = 0 i = 0 inc = 100 pre_loop_inc = loop_count % inc acc = #{inner_meth_name}_single_key(cache, key, i, pre_loop_inc, acc) i += pre_loop_inc while i < loop_count acc = #{inner_meth_name}_single_key(cache, key, i, inc, acc) i += inc end acc end unless method_defined?(:#{outer_meth_name}_single_key) RUBY_EVAL outer_meth_name end def to_keys_array(key_count) arr = [] key_count.times {|i| arr << i} arr end def to_hash_collision_keys_array(key_count) to_keys_array(key_count).map {|key| ThreadSafe::Test::HashCollisionKey(key)} end def sum(result) result.inject(0) {|acc, i| acc + i} end def assert_standard_accumulator_test_result(result, cache, options, keys) assert_all_key_mappings_exist(cache, keys) assert_equal(options[:key_count], sum(result)) assert_equal(options[:key_count], cache.size) end def assert_all_key_mappings_exist(cache, keys, all_must_exist = true) keys.each do |key| if (value = cache[key]) || all_must_exist assert_equal key, value unless key == value # don't do a bazzilion assertions unless necessary end end end def assert_count_up(result, cache, options, keys) keys.each do |key| unless value = cache[key] assert value end end assert_equal(sum(cache.values), sum(result)) assert_equal(options[:key_count], cache.size) end end unless RUBY_VERSION =~ /1\.8/ || ENV['TRAVIS'] thread_safe-0.3.5/.yardopts0000644000004100000410000000027612530443654015675 0ustar www-datawww-data--protected --no-private --embed-mixins --output-dir ./yardoc --markup markdown --title=Concurrent Ruby --template default --template-path ./yard-template ./lib/**/*.rb - README.md LICENSE thread_safe-0.3.5/tasks/0000755000004100000410000000000012530443654015147 5ustar www-datawww-datathread_safe-0.3.5/tasks/update_doc.rake0000644000004100000410000000223612530443654020125 0ustar www-datawww-datarequire 'yard' YARD::Rake::YardocTask.new root = File.expand_path File.join(File.dirname(__FILE__), '..') namespace :yard do cmd = lambda do |command| puts ">> executing: #{command}" system command or raise "#{command} failed" end desc 'Pushes generated documentation to github pages: http://ruby-concurrency.github.io/thread_safe/' task :push => [:setup, :yard] do message = Dir.chdir(root) do `git log -n 1 --oneline`.strip end puts "Generating commit: #{message}" Dir.chdir "#{root}/yardoc" do cmd.call "git add -A" cmd.call "git commit -m '#{message}'" cmd.call 'git push origin gh-pages' end end desc 'Setups second clone in ./yardoc dir for pushing doc to github' task :setup do unless File.exist? "#{root}/yardoc/.git" cmd.call "rm -rf #{root}/yardoc" if File.exist?("#{root}/yardoc") Dir.chdir "#{root}" do cmd.call 'git clone --single-branch --branch gh-pages git@github.com:ruby-concurrency/thread_safe.git ./yardoc' end end Dir.chdir "#{root}/yardoc" do cmd.call 'git fetch origin' cmd.call 'git reset --hard origin/gh-pages' end end end thread_safe-0.3.5/LICENSE0000644000004100000410000002201612530443654015030 0ustar www-datawww-dataApache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS thread_safe-0.3.5/ext/0000755000004100000410000000000012530443654014622 5ustar www-datawww-datathread_safe-0.3.5/ext/thread_safe/0000755000004100000410000000000012530443654017067 5ustar www-datawww-datathread_safe-0.3.5/ext/thread_safe/JrubyCacheBackendService.java0000644000004100000410000000075712530443654024553 0ustar www-datawww-datapackage thread_safe; import java.io.IOException; import org.jruby.Ruby; import org.jruby.ext.thread_safe.JRubyCacheBackendLibrary; import org.jruby.runtime.load.BasicLibraryService; // can't name this JRubyCacheBackendService or else JRuby doesn't pick this up public class JrubyCacheBackendService implements BasicLibraryService { public boolean basicLoad(final Ruby runtime) throws IOException { new JRubyCacheBackendLibrary().load(runtime, false); return true; } } thread_safe-0.3.5/ext/org/0000755000004100000410000000000012530443654015411 5ustar www-datawww-datathread_safe-0.3.5/ext/org/jruby/0000755000004100000410000000000012530443654016544 5ustar www-datawww-datathread_safe-0.3.5/ext/org/jruby/ext/0000755000004100000410000000000012530443654017344 5ustar www-datawww-datathread_safe-0.3.5/ext/org/jruby/ext/thread_safe/0000755000004100000410000000000012530443654021611 5ustar www-datawww-datathread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166y/0000755000004100000410000000000012530443654023035 5ustar www-datawww-datathread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166y/ThreadLocalRandom.java0000644000004100000410000001525612530443654027234 0ustar www-datawww-data/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ // This is based on 1.16 version package org.jruby.ext.thread_safe.jsr166y; import java.util.Random; /** * A random number generator isolated to the current thread. Like the * global {@link java.util.Random} generator used by the {@link * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized * with an internally generated seed that may not otherwise be * modified. When applicable, use of {@code ThreadLocalRandom} rather * than shared {@code Random} objects in concurrent programs will * typically encounter much less overhead and contention. Use of * {@code ThreadLocalRandom} is particularly appropriate when multiple * tasks (for example, each a {@link ForkJoinTask}) use random numbers * in parallel in thread pools. * *

Usages of this class should typically be of the form: * {@code ThreadLocalRandom.current().nextX(...)} (where * {@code X} is {@code Int}, {@code Long}, etc). * When all usages are of this form, it is never possible to * accidently share a {@code ThreadLocalRandom} across multiple threads. * *

This class also provides additional commonly used bounded random * generation methods. * * @since 1.7 * @author Doug Lea */ public class ThreadLocalRandom extends Random { // same constants as Random, but must be redeclared because private private static final long multiplier = 0x5DEECE66DL; private static final long addend = 0xBL; private static final long mask = (1L << 48) - 1; /** * The random seed. We can't use super.seed. */ private long rnd; /** * Initialization flag to permit calls to setSeed to succeed only * while executing the Random constructor. We can't allow others * since it would cause setting seed in one part of a program to * unintentionally impact other usages by the thread. */ boolean initialized; // Padding to help avoid memory contention among seed updates in // different TLRs in the common case that they are located near // each other. private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; /** * The actual ThreadLocal */ private static final ThreadLocal localRandom = new ThreadLocal() { protected ThreadLocalRandom initialValue() { return new ThreadLocalRandom(); } }; /** * Constructor called only by localRandom.initialValue. */ ThreadLocalRandom() { super(); initialized = true; } /** * Returns the current thread's {@code ThreadLocalRandom}. * * @return the current thread's {@code ThreadLocalRandom} */ public static ThreadLocalRandom current() { return localRandom.get(); } /** * Throws {@code UnsupportedOperationException}. Setting seeds in * this generator is not supported. * * @throws UnsupportedOperationException always */ public void setSeed(long seed) { if (initialized) throw new UnsupportedOperationException(); rnd = (seed ^ multiplier) & mask; } protected int next(int bits) { rnd = (rnd * multiplier + addend) & mask; return (int) (rnd >>> (48-bits)); } /** * Returns a pseudorandom, uniformly distributed value between the * given least value (inclusive) and bound (exclusive). * * @param least the least value returned * @param bound the upper bound (exclusive) * @throws IllegalArgumentException if least greater than or equal * to bound * @return the next value */ public int nextInt(int least, int bound) { if (least >= bound) throw new IllegalArgumentException(); return nextInt(bound - least) + least; } /** * Returns a pseudorandom, uniformly distributed value * between 0 (inclusive) and the specified value (exclusive). * * @param n the bound on the random number to be returned. Must be * positive. * @return the next value * @throws IllegalArgumentException if n is not positive */ public long nextLong(long n) { if (n <= 0) throw new IllegalArgumentException("n must be positive"); // Divide n by two until small enough for nextInt. On each // iteration (at most 31 of them but usually much less), // randomly choose both whether to include high bit in result // (offset) and whether to continue with the lower vs upper // half (which makes a difference only if odd). long offset = 0; while (n >= Integer.MAX_VALUE) { int bits = next(2); long half = n >>> 1; long nextn = ((bits & 2) == 0) ? half : n - half; if ((bits & 1) == 0) offset += n - nextn; n = nextn; } return offset + nextInt((int) n); } /** * Returns a pseudorandom, uniformly distributed value between the * given least value (inclusive) and bound (exclusive). * * @param least the least value returned * @param bound the upper bound (exclusive) * @return the next value * @throws IllegalArgumentException if least greater than or equal * to bound */ public long nextLong(long least, long bound) { if (least >= bound) throw new IllegalArgumentException(); return nextLong(bound - least) + least; } /** * Returns a pseudorandom, uniformly distributed {@code double} value * between 0 (inclusive) and the specified value (exclusive). * * @param n the bound on the random number to be returned. Must be * positive. * @return the next value * @throws IllegalArgumentException if n is not positive */ public double nextDouble(double n) { if (n <= 0) throw new IllegalArgumentException("n must be positive"); return nextDouble() * n; } /** * Returns a pseudorandom, uniformly distributed value between the * given least value (inclusive) and bound (exclusive). * * @param least the least value returned * @param bound the upper bound (exclusive) * @return the next value * @throws IllegalArgumentException if least greater than or equal * to bound */ public double nextDouble(double least, double bound) { if (least >= bound) throw new IllegalArgumentException(); return nextDouble() * (bound - least) + least; } private static final long serialVersionUID = -5851777807851030925L; } thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/0000755000004100000410000000000012530443654023011 5ustar www-datawww-datathread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/0000755000004100000410000000000012530443654024627 5ustar www-datawww-datathread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/Striped64.java0000644000004100000410000002666012530443654027270 0ustar www-datawww-data/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ // This is based on 1.5 version. package org.jruby.ext.thread_safe.jsr166e.nounsafe; import java.util.Random; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicLongFieldUpdater; /** * A package-local class holding common representation and mechanics * for classes supporting dynamic striping on 64bit values. The class * extends Number so that concrete subclasses must publicly do so. */ abstract class Striped64 extends Number { /* * This class maintains a lazily-initialized table of atomically * updated variables, plus an extra "base" field. The table size * is a power of two. Indexing uses masked per-thread hash codes. * Nearly all declarations in this class are package-private, * accessed directly by subclasses. * * Table entries are of class Cell; a variant of AtomicLong padded * to reduce cache contention on most processors. Padding is * overkill for most Atomics because they are usually irregularly * scattered in memory and thus don't interfere much with each * other. But Atomic objects residing in arrays will tend to be * placed adjacent to each other, and so will most often share * cache lines (with a huge negative performance impact) without * this precaution. * * In part because Cells are relatively large, we avoid creating * them until they are needed. When there is no contention, all * updates are made to the base field. Upon first contention (a * failed CAS on base update), the table is initialized to size 2. * The table size is doubled upon further contention until * reaching the nearest power of two greater than or equal to the * number of CPUS. Table slots remain empty (null) until they are * needed. * * A single spinlock ("busy") is used for initializing and * resizing the table, as well as populating slots with new Cells. * There is no need for a blocking lock: When the lock is not * available, threads try other slots (or the base). During these * retries, there is increased contention and reduced locality, * which is still better than alternatives. * * Per-thread hash codes are initialized to random values. * Contention and/or table collisions are indicated by failed * CASes when performing an update operation (see method * retryUpdate). Upon a collision, if the table size is less than * the capacity, it is doubled in size unless some other thread * holds the lock. If a hashed slot is empty, and lock is * available, a new Cell is created. Otherwise, if the slot * exists, a CAS is tried. Retries proceed by "double hashing", * using a secondary hash (Marsaglia XorShift) to try to find a * free slot. * * The table size is capped because, when there are more threads * than CPUs, supposing that each thread were bound to a CPU, * there would exist a perfect hash function mapping threads to * slots that eliminates collisions. When we reach capacity, we * search for this mapping by randomly varying the hash codes of * colliding threads. Because search is random, and collisions * only become known via CAS failures, convergence can be slow, * and because threads are typically not bound to CPUS forever, * may not occur at all. However, despite these limitations, * observed contention rates are typically low in these cases. * * It is possible for a Cell to become unused when threads that * once hashed to it terminate, as well as in the case where * doubling the table causes no thread to hash to it under * expanded mask. We do not try to detect or remove such cells, * under the assumption that for long-running instances, observed * contention levels will recur, so the cells will eventually be * needed again; and for short-lived ones, it does not matter. */ /** * Padded variant of AtomicLong supporting only raw accesses plus CAS. * The value field is placed between pads, hoping that the JVM doesn't * reorder them. * * JVM intrinsics note: It would be possible to use a release-only * form of CAS here, if it were provided. */ static final class Cell { volatile long p0, p1, p2, p3, p4, p5, p6; volatile long value; volatile long q0, q1, q2, q3, q4, q5, q6; static AtomicLongFieldUpdater VALUE_UPDATER = AtomicLongFieldUpdater.newUpdater(Cell.class, "value"); Cell(long x) { value = x; } final boolean cas(long cmp, long val) { return VALUE_UPDATER.compareAndSet(this, cmp, val); } } /** * Holder for the thread-local hash code. The code is initially * random, but may be set to a different value upon collisions. */ static final class HashCode { static final Random rng = new Random(); int code; HashCode() { int h = rng.nextInt(); // Avoid zero to allow xorShift rehash code = (h == 0) ? 1 : h; } } /** * The corresponding ThreadLocal class */ static final class ThreadHashCode extends ThreadLocal { public HashCode initialValue() { return new HashCode(); } } /** * Static per-thread hash codes. Shared across all instances to * reduce ThreadLocal pollution and because adjustments due to * collisions in one table are likely to be appropriate for * others. */ static final ThreadHashCode threadHashCode = new ThreadHashCode(); /** Number of CPUS, to place bound on table size */ static final int NCPU = Runtime.getRuntime().availableProcessors(); /** * Table of cells. When non-null, size is a power of 2. */ transient volatile Cell[] cells; /** * Base value, used mainly when there is no contention, but also as * a fallback during table initialization races. Updated via CAS. */ transient volatile long base; /** * Spinlock (locked via CAS) used when resizing and/or creating Cells. */ transient volatile int busy; AtomicLongFieldUpdater BASE_UPDATER = AtomicLongFieldUpdater.newUpdater(Striped64.class, "base"); AtomicIntegerFieldUpdater BUSY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Striped64.class, "busy"); /** * Package-private default constructor */ Striped64() { } /** * CASes the base field. */ final boolean casBase(long cmp, long val) { return BASE_UPDATER.compareAndSet(this, cmp, val); } /** * CASes the busy field from 0 to 1 to acquire lock. */ final boolean casBusy() { return BUSY_UPDATER.compareAndSet(this, 0, 1); } /** * Computes the function of current and new value. Subclasses * should open-code this update function for most uses, but the * virtualized form is needed within retryUpdate. * * @param currentValue the current value (of either base or a cell) * @param newValue the argument from a user update call * @return result of the update function */ abstract long fn(long currentValue, long newValue); /** * Handles cases of updates involving initialization, resizing, * creating new Cells, and/or contention. See above for * explanation. This method suffers the usual non-modularity * problems of optimistic retry code, relying on rechecked sets of * reads. * * @param x the value * @param hc the hash code holder * @param wasUncontended false if CAS failed before call */ final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { int h = hc.code; boolean collide = false; // True if last slot nonempty for (;;) { Cell[] as; Cell a; int n; long v; if ((as = cells) != null && (n = as.length) > 0) { if ((a = as[(n - 1) & h]) == null) { if (busy == 0) { // Try to attach new Cell Cell r = new Cell(x); // Optimistically create if (busy == 0 && casBusy()) { boolean created = false; try { // Recheck under lock Cell[] rs; int m, j; if ((rs = cells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) { rs[j] = r; created = true; } } finally { busy = 0; } if (created) break; continue; // Slot is now non-empty } } collide = false; } else if (!wasUncontended) // CAS already known to fail wasUncontended = true; // Continue after rehash else if (a.cas(v = a.value, fn(v, x))) break; else if (n >= NCPU || cells != as) collide = false; // At max size or stale else if (!collide) collide = true; else if (busy == 0 && casBusy()) { try { if (cells == as) { // Expand table unless stale Cell[] rs = new Cell[n << 1]; for (int i = 0; i < n; ++i) rs[i] = as[i]; cells = rs; } } finally { busy = 0; } collide = false; continue; // Retry with expanded table } h ^= h << 13; // Rehash h ^= h >>> 17; h ^= h << 5; } else if (busy == 0 && cells == as && casBusy()) { boolean init = false; try { // Initialize table if (cells == as) { Cell[] rs = new Cell[2]; rs[h & 1] = new Cell(x); cells = rs; init = true; } } finally { busy = 0; } if (init) break; } else if (casBase(v = base, fn(v, x))) break; // Fall back on using base } hc.code = h; // Record index for next time } /** * Sets base and all cells to the given value. */ final void internalReset(long initialValue) { Cell[] as = cells; base = initialValue; if (as != null) { int n = as.length; for (int i = 0; i < n; ++i) { Cell a = as[i]; if (a != null) a.value = initialValue; } } } }thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/LongAdder.java0000644000004100000410000001336312530443654027337 0ustar www-datawww-data/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ // This is based on 1.9 version. package org.jruby.ext.thread_safe.jsr166e.nounsafe; import java.util.concurrent.atomic.AtomicLong; import java.io.IOException; import java.io.Serializable; import java.io.ObjectInputStream; /** * One or more variables that together maintain an initially zero * {@code long} sum. When updates (method {@link #add}) are contended * across threads, the set of variables may grow dynamically to reduce * contention. Method {@link #sum} (or, equivalently, {@link * #longValue}) returns the current total combined across the * variables maintaining the sum. * *

This class is usually preferable to {@link AtomicLong} when * multiple threads update a common sum that is used for purposes such * as collecting statistics, not for fine-grained synchronization * control. Under low update contention, the two classes have similar * characteristics. But under high contention, expected throughput of * this class is significantly higher, at the expense of higher space * consumption. * *

This class extends {@link Number}, but does not define * methods such as {@code hashCode} and {@code compareTo} because * instances are expected to be mutated, and so are not useful as * collection keys. * *

jsr166e note: This class is targeted to be placed in * java.util.concurrent.atomic. * * @since 1.8 * @author Doug Lea */ public class LongAdder extends Striped64 implements Serializable { private static final long serialVersionUID = 7249069246863182397L; /** * Version of plus for use in retryUpdate */ final long fn(long v, long x) { return v + x; } /** * Creates a new adder with initial sum of zero. */ public LongAdder() { } /** * Adds the given value. * * @param x the value to add */ public void add(long x) { Cell[] as; long b, v; HashCode hc; Cell a; int n; if ((as = cells) != null || !casBase(b = base, b + x)) { boolean uncontended = true; int h = (hc = threadHashCode.get()).code; if (as == null || (n = as.length) < 1 || (a = as[(n - 1) & h]) == null || !(uncontended = a.cas(v = a.value, v + x))) retryUpdate(x, hc, uncontended); } } /** * Equivalent to {@code add(1)}. */ public void increment() { add(1L); } /** * Equivalent to {@code add(-1)}. */ public void decrement() { add(-1L); } /** * Returns the current sum. The returned value is NOT an * atomic snapshot: Invocation in the absence of concurrent * updates returns an accurate result, but concurrent updates that * occur while the sum is being calculated might not be * incorporated. * * @return the sum */ public long sum() { long sum = base; Cell[] as = cells; if (as != null) { int n = as.length; for (int i = 0; i < n; ++i) { Cell a = as[i]; if (a != null) sum += a.value; } } return sum; } /** * Resets variables maintaining the sum to zero. This method may * be a useful alternative to creating a new adder, but is only * effective if there are no concurrent updates. Because this * method is intrinsically racy, it should only be used when it is * known that no threads are concurrently updating. */ public void reset() { internalReset(0L); } /** * Equivalent in effect to {@link #sum} followed by {@link * #reset}. This method may apply for example during quiescent * points between multithreaded computations. If there are * updates concurrent with this method, the returned value is * not guaranteed to be the final value occurring before * the reset. * * @return the sum */ public long sumThenReset() { long sum = base; Cell[] as = cells; base = 0L; if (as != null) { int n = as.length; for (int i = 0; i < n; ++i) { Cell a = as[i]; if (a != null) { sum += a.value; a.value = 0L; } } } return sum; } /** * Returns the String representation of the {@link #sum}. * @return the String representation of the {@link #sum} */ public String toString() { return Long.toString(sum()); } /** * Equivalent to {@link #sum}. * * @return the sum */ public long longValue() { return sum(); } /** * Returns the {@link #sum} as an {@code int} after a narrowing * primitive conversion. */ public int intValue() { return (int)sum(); } /** * Returns the {@link #sum} as a {@code float} * after a widening primitive conversion. */ public float floatValue() { return (float)sum(); } /** * Returns the {@link #sum} as a {@code double} after a widening * primitive conversion. */ public double doubleValue() { return (double)sum(); } private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { s.defaultWriteObject(); s.writeLong(sum()); } private void readObject(ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); busy = 0; cells = null; base = s.readLong(); } } thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/nounsafe/ConcurrentHashMapV8.java0000644000004100000410000046421612530443654031311 0ustar www-datawww-data/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ // This is based on the 1.79 version. package org.jruby.ext.thread_safe.jsr166e.nounsafe; import org.jruby.RubyClass; import org.jruby.RubyNumeric; import org.jruby.RubyObject; import org.jruby.exceptions.RaiseException; import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap; import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom; import org.jruby.runtime.ThreadContext; import org.jruby.runtime.builtin.IRubyObject; import java.util.Arrays; import java.util.Map; import java.util.Set; import java.util.Collection; import java.util.Hashtable; import java.util.HashMap; import java.util.Iterator; import java.util.Enumeration; import java.util.ConcurrentModificationException; import java.util.NoSuchElementException; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.concurrent.locks.AbstractQueuedSynchronizer; import java.io.Serializable; /** * A hash table supporting full concurrency of retrievals and * high expected concurrency for updates. This class obeys the * same functional specification as {@link java.util.Hashtable}, and * includes versions of methods corresponding to each method of * {@code Hashtable}. However, even though all operations are * thread-safe, retrieval operations do not entail locking, * and there is not any support for locking the entire table * in a way that prevents all access. This class is fully * interoperable with {@code Hashtable} in programs that rely on its * thread safety but not on its synchronization details. * *

Retrieval operations (including {@code get}) generally do not * block, so may overlap with update operations (including {@code put} * and {@code remove}). Retrievals reflect the results of the most * recently completed update operations holding upon their * onset. (More formally, an update operation for a given key bears a * happens-before relation with any (non-null) retrieval for * that key reporting the updated value.) For aggregate operations * such as {@code putAll} and {@code clear}, concurrent retrievals may * reflect insertion or removal of only some entries. Similarly, * Iterators and Enumerations return elements reflecting the state of * the hash table at some point at or since the creation of the * iterator/enumeration. They do not throw {@link * ConcurrentModificationException}. However, iterators are designed * to be used by only one thread at a time. Bear in mind that the * results of aggregate status methods including {@code size}, {@code * isEmpty}, and {@code containsValue} are typically useful only when * a map is not undergoing concurrent updates in other threads. * Otherwise the results of these methods reflect transient states * that may be adequate for monitoring or estimation purposes, but not * for program control. * *

The table is dynamically expanded when there are too many * collisions (i.e., keys that have distinct hash codes but fall into * the same slot modulo the table size), with the expected average * effect of maintaining roughly two bins per mapping (corresponding * to a 0.75 load factor threshold for resizing). There may be much * variance around this average as mappings are added and removed, but * overall, this maintains a commonly accepted time/space tradeoff for * hash tables. However, resizing this or any other kind of hash * table may be a relatively slow operation. When possible, it is a * good idea to provide a size estimate as an optional {@code * initialCapacity} constructor argument. An additional optional * {@code loadFactor} constructor argument provides a further means of * customizing initial table capacity by specifying the table density * to be used in calculating the amount of space to allocate for the * given number of elements. Also, for compatibility with previous * versions of this class, constructors may optionally specify an * expected {@code concurrencyLevel} as an additional hint for * internal sizing. Note that using many keys with exactly the same * {@code hashCode()} is a sure way to slow down performance of any * hash table. * *

A {@link Set} projection of a ConcurrentHashMapV8 may be created * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed * (using {@link #keySet(Object)} when only keys are of interest, and the * mapped values are (perhaps transiently) not used or all take the * same mapping value. * *

A ConcurrentHashMapV8 can be used as scalable frequency map (a * form of histogram or multiset) by using {@link LongAdder} values * and initializing via {@link #computeIfAbsent}. For example, to add * a count to a {@code ConcurrentHashMapV8 freqs}, you * can use {@code freqs.computeIfAbsent(k -> new * LongAdder()).increment();} * *

This class and its views and iterators implement all of the * optional methods of the {@link Map} and {@link Iterator} * interfaces. * *

Like {@link Hashtable} but unlike {@link HashMap}, this class * does not allow {@code null} to be used as a key or value. * *

ConcurrentHashMapV8s support parallel operations using the {@link * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts * are available in class {@link ForkJoinTasks}). These operations are * designed to be safely, and often sensibly, applied even with maps * that are being concurrently updated by other threads; for example, * when computing a snapshot summary of the values in a shared * registry. There are three kinds of operation, each with four * forms, accepting functions with Keys, Values, Entries, and (Key, * Value) arguments and/or return values. (The first three forms are * also available via the {@link #keySet()}, {@link #values()} and * {@link #entrySet()} views). Because the elements of a * ConcurrentHashMapV8 are not ordered in any particular way, and may be * processed in different orders in different parallel executions, the * correctness of supplied functions should not depend on any * ordering, or on any other objects or values that may transiently * change while computation is in progress; and except for forEach * actions, should ideally be side-effect-free. * *

    *
  • forEach: Perform a given action on each element. * A variant form applies a given transformation on each element * before performing the action.
  • * *
  • search: Return the first available non-null result of * applying a given function on each element; skipping further * search when a result is found.
  • * *
  • reduce: Accumulate each element. The supplied reduction * function cannot rely on ordering (more formally, it should be * both associative and commutative). There are five variants: * *
      * *
    • Plain reductions. (There is not a form of this method for * (key, value) function arguments since there is no corresponding * return type.)
    • * *
    • Mapped reductions that accumulate the results of a given * function applied to each element.
    • * *
    • Reductions to scalar doubles, longs, and ints, using a * given basis value.
    • * * *
    *
* *

The concurrency properties of bulk operations follow * from those of ConcurrentHashMapV8: Any non-null result returned * from {@code get(key)} and related access methods bears a * happens-before relation with the associated insertion or * update. The result of any bulk operation reflects the * composition of these per-element relations (but is not * necessarily atomic with respect to the map as a whole unless it * is somehow known to be quiescent). Conversely, because keys * and values in the map are never null, null serves as a reliable * atomic indicator of the current lack of any result. To * maintain this property, null serves as an implicit basis for * all non-scalar reduction operations. For the double, long, and * int versions, the basis should be one that, when combined with * any other value, returns that other value (more formally, it * should be the identity element for the reduction). Most common * reductions have these properties; for example, computing a sum * with basis 0 or a minimum with basis MAX_VALUE. * *

Search and transformation functions provided as arguments * should similarly return null to indicate the lack of any result * (in which case it is not used). In the case of mapped * reductions, this also enables transformations to serve as * filters, returning null (or, in the case of primitive * specializations, the identity basis) if the element should not * be combined. You can create compound transformations and * filterings by composing them yourself under this "null means * there is nothing there now" rule before using them in search or * reduce operations. * *

Methods accepting and/or returning Entry arguments maintain * key-value associations. They may be useful for example when * finding the key for the greatest value. Note that "plain" Entry * arguments can be supplied using {@code new * AbstractMap.SimpleEntry(k,v)}. * *

Bulk operations may complete abruptly, throwing an * exception encountered in the application of a supplied * function. Bear in mind when handling such exceptions that other * concurrently executing functions could also have thrown * exceptions, or would have done so if the first exception had * not occurred. * *

Parallel speedups for bulk operations compared to sequential * processing are common but not guaranteed. Operations involving * brief functions on small maps may execute more slowly than * sequential loops if the underlying work to parallelize the * computation is more expensive than the computation itself. * Similarly, parallelization may not lead to much actual parallelism * if all processors are busy performing unrelated tasks. * *

All arguments to all task methods must be non-null. * *

jsr166e note: During transition, this class * uses nested functional interfaces with different names but the * same forms as those expected for JDK8. * *

This class is a member of the * * Java Collections Framework. * * @since 1.5 * @author Doug Lea * @param the type of keys maintained by this map * @param the type of mapped values */ public class ConcurrentHashMapV8 implements ConcurrentMap, Serializable, ConcurrentHashMap { private static final long serialVersionUID = 7249069246763182397L; /** * A partitionable iterator. A Spliterator can be traversed * directly, but can also be partitioned (before traversal) by * creating another Spliterator that covers a non-overlapping * portion of the elements, and so may be amenable to parallel * execution. * *

This interface exports a subset of expected JDK8 * functionality. * *

Sample usage: Here is one (of the several) ways to compute * the sum of the values held in a map using the ForkJoin * framework. As illustrated here, Spliterators are well suited to * designs in which a task repeatedly splits off half its work * into forked subtasks until small enough to process directly, * and then joins these subtasks. Variants of this style can also * be used in completion-based designs. * *

     * {@code ConcurrentHashMapV8 m = ...
     * // split as if have 8 * parallelism, for load balance
     * int n = m.size();
     * int p = aForkJoinPool.getParallelism() * 8;
     * int split = (n < p)? n : p;
     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
     * // ...
     * static class SumValues extends RecursiveTask {
     *   final Spliterator s;
     *   final int split;             // split while > 1
     *   final SumValues nextJoin;    // records forked subtasks to join
     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
     *   }
     *   public Long compute() {
     *     long sum = 0;
     *     SumValues subtasks = null; // fork subtasks
     *     for (int s = split >>> 1; s > 0; s >>>= 1)
     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
     *     while (s.hasNext())        // directly process remaining elements
     *       sum += s.next();
     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
     *       sum += t.join();         // collect subtask results
     *     return sum;
     *   }
     * }
     * }
*/ public static interface Spliterator extends Iterator { /** * Returns a Spliterator covering approximately half of the * elements, guaranteed not to overlap with those subsequently * returned by this Spliterator. After invoking this method, * the current Spliterator will not produce any of * the elements of the returned Spliterator, but the two * Spliterators together will produce all of the elements that * would have been produced by this Spliterator had this * method not been called. The exact number of elements * produced by the returned Spliterator is not guaranteed, and * may be zero (i.e., with {@code hasNext()} reporting {@code * false}) if this Spliterator cannot be further split. * * @return a Spliterator covering approximately half of the * elements * @throws IllegalStateException if this Spliterator has * already commenced traversing elements */ Spliterator split(); } /* * Overview: * * The primary design goal of this hash table is to maintain * concurrent readability (typically method get(), but also * iterators and related methods) while minimizing update * contention. Secondary goals are to keep space consumption about * the same or better than java.util.HashMap, and to support high * initial insertion rates on an empty table by many threads. * * Each key-value mapping is held in a Node. Because Node fields * can contain special values, they are defined using plain Object * types. Similarly in turn, all internal methods that use them * work off Object types. And similarly, so do the internal * methods of auxiliary iterator and view classes. All public * generic typed methods relay in/out of these internal methods, * supplying null-checks and casts as needed. This also allows * many of the public methods to be factored into a smaller number * of internal methods (although sadly not so for the five * variants of put-related operations). The validation-based * approach explained below leads to a lot of code sprawl because * retry-control precludes factoring into smaller methods. * * The table is lazily initialized to a power-of-two size upon the * first insertion. Each bin in the table normally contains a * list of Nodes (most often, the list has only zero or one Node). * Table accesses require volatile/atomic reads, writes, and * CASes. Because there is no other way to arrange this without * adding further indirections, we use intrinsics * (sun.misc.Unsafe) operations. The lists of nodes within bins * are always accurately traversable under volatile reads, so long * as lookups check hash code and non-nullness of value before * checking key equality. * * We use the top two bits of Node hash fields for control * purposes -- they are available anyway because of addressing * constraints. As explained further below, these top bits are * used as follows: * 00 - Normal * 01 - Locked * 11 - Locked and may have a thread waiting for lock * 10 - Node is a forwarding node * * The lower 30 bits of each Node's hash field contain a * transformation of the key's hash code, except for forwarding * nodes, for which the lower bits are zero (and so always have * hash field == MOVED). * * Insertion (via put or its variants) of the first node in an * empty bin is performed by just CASing it to the bin. This is * by far the most common case for put operations under most * key/hash distributions. Other update operations (insert, * delete, and replace) require locks. We do not want to waste * the space required to associate a distinct lock object with * each bin, so instead use the first node of a bin list itself as * a lock. Blocking support for these locks relies on the builtin * "synchronized" monitors. However, we also need a tryLock * construction, so we overlay these by using bits of the Node * hash field for lock control (see above), and so normally use * builtin monitors only for blocking and signalling using * wait/notifyAll constructions. See Node.tryAwaitLock. * * Using the first node of a list as a lock does not by itself * suffice though: When a node is locked, any update must first * validate that it is still the first node after locking it, and * retry if not. Because new nodes are always appended to lists, * once a node is first in a bin, it remains first until deleted * or the bin becomes invalidated (upon resizing). However, * operations that only conditionally update may inspect nodes * until the point of update. This is a converse of sorts to the * lazy locking technique described by Herlihy & Shavit. * * The main disadvantage of per-bin locks is that other update * operations on other nodes in a bin list protected by the same * lock can stall, for example when user equals() or mapping * functions take a long time. However, statistically, under * random hash codes, this is not a common problem. Ideally, the * frequency of nodes in bins follows a Poisson distribution * (http://en.wikipedia.org/wiki/Poisson_distribution) with a * parameter of about 0.5 on average, given the resizing threshold * of 0.75, although with a large variance because of resizing * granularity. Ignoring variance, the expected occurrences of * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The * first values are: * * 0: 0.60653066 * 1: 0.30326533 * 2: 0.07581633 * 3: 0.01263606 * 4: 0.00157952 * 5: 0.00015795 * 6: 0.00001316 * 7: 0.00000094 * 8: 0.00000006 * more: less than 1 in ten million * * Lock contention probability for two threads accessing distinct * elements is roughly 1 / (8 * #elements) under random hashes. * * Actual hash code distributions encountered in practice * sometimes deviate significantly from uniform randomness. This * includes the case when N > (1<<30), so some keys MUST collide. * Similarly for dumb or hostile usages in which multiple keys are * designed to have identical hash codes. Also, although we guard * against the worst effects of this (see method spread), sets of * hashes may differ only in bits that do not impact their bin * index for a given power-of-two mask. So we use a secondary * strategy that applies when the number of nodes in a bin exceeds * a threshold, and at least one of the keys implements * Comparable. These TreeBins use a balanced tree to hold nodes * (a specialized form of red-black trees), bounding search time * to O(log N). Each search step in a TreeBin is around twice as * slow as in a regular list, but given that N cannot exceed * (1<<64) (before running out of addresses) this bounds search * steps, lock hold times, etc, to reasonable constants (roughly * 100 nodes inspected per operation worst case) so long as keys * are Comparable (which is very common -- String, Long, etc). * TreeBin nodes (TreeNodes) also maintain the same "next" * traversal pointers as regular nodes, so can be traversed in * iterators in the same way. * * The table is resized when occupancy exceeds a percentage * threshold (nominally, 0.75, but see below). Only a single * thread performs the resize (using field "sizeCtl", to arrange * exclusion), but the table otherwise remains usable for reads * and updates. Resizing proceeds by transferring bins, one by * one, from the table to the next table. Because we are using * power-of-two expansion, the elements from each bin must either * stay at same index, or move with a power of two offset. We * eliminate unnecessary node creation by catching cases where old * nodes can be reused because their next fields won't change. On * average, only about one-sixth of them need cloning when a table * doubles. The nodes they replace will be garbage collectable as * soon as they are no longer referenced by any reader thread that * may be in the midst of concurrently traversing table. Upon * transfer, the old table bin contains only a special forwarding * node (with hash field "MOVED") that contains the next table as * its key. On encountering a forwarding node, access and update * operations restart, using the new table. * * Each bin transfer requires its bin lock. However, unlike other * cases, a transfer can skip a bin if it fails to acquire its * lock, and revisit it later (unless it is a TreeBin). Method * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that * have been skipped because of failure to acquire a lock, and * blocks only if none are available (i.e., only very rarely). * The transfer operation must also ensure that all accessible * bins in both the old and new table are usable by any traversal. * When there are no lock acquisition failures, this is arranged * simply by proceeding from the last bin (table.length - 1) up * towards the first. Upon seeing a forwarding node, traversals * (see class Iter) arrange to move to the new table * without revisiting nodes. However, when any node is skipped * during a transfer, all earlier table bins may have become * visible, so are initialized with a reverse-forwarding node back * to the old table until the new ones are established. (This * sometimes requires transiently locking a forwarding node, which * is possible under the above encoding.) These more expensive * mechanics trigger only when necessary. * * The traversal scheme also applies to partial traversals of * ranges of bins (via an alternate Traverser constructor) * to support partitioned aggregate operations. Also, read-only * operations give up if ever forwarded to a null table, which * provides support for shutdown-style clearing, which is also not * currently implemented. * * Lazy table initialization minimizes footprint until first use, * and also avoids resizings when the first operation is from a * putAll, constructor with map argument, or deserialization. * These cases attempt to override the initial capacity settings, * but harmlessly fail to take effect in cases of races. * * The element count is maintained using a LongAdder, which avoids * contention on updates but can encounter cache thrashing if read * too frequently during concurrent access. To avoid reading so * often, resizing is attempted either when a bin lock is * contended, or upon adding to a bin already holding two or more * nodes (checked before adding in the xIfAbsent methods, after * adding in others). Under uniform hash distributions, the * probability of this occurring at threshold is around 13%, * meaning that only about 1 in 8 puts check threshold (and after * resizing, many fewer do so). But this approximation has high * variance for small table sizes, so we check on any collision * for sizes <= 64. The bulk putAll operation further reduces * contention by only committing count updates upon these size * checks. * * Maintaining API and serialization compatibility with previous * versions of this class introduces several oddities. Mainly: We * leave untouched but unused constructor arguments refering to * concurrencyLevel. We accept a loadFactor constructor argument, * but apply it only to initial table capacity (which is the only * time that we can guarantee to honor it.) We also declare an * unused "Segment" class that is instantiated in minimal form * only when serializing. */ /* ---------------- Constants -------------- */ /** * The largest possible table capacity. This value must be * exactly 1<<30 to stay within Java array allocation and indexing * bounds for power of two table sizes, and is further required * because the top two bits of 32bit hash fields are used for * control purposes. */ private static final int MAXIMUM_CAPACITY = 1 << 30; /** * The default initial table capacity. Must be a power of 2 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. */ private static final int DEFAULT_CAPACITY = 16; /** * The largest possible (non-power of two) array size. * Needed by toArray and related methods. */ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; /** * The default concurrency level for this table. Unused but * defined for compatibility with previous versions of this class. */ private static final int DEFAULT_CONCURRENCY_LEVEL = 16; /** * The load factor for this table. Overrides of this value in * constructors affect only the initial table capacity. The * actual floating point value isn't normally used -- it is * simpler to use expressions such as {@code n - (n >>> 2)} for * the associated resizing threshold. */ private static final float LOAD_FACTOR = 0.75f; /** * The buffer size for skipped bins during transfers. The * value is arbitrary but should be large enough to avoid * most locking stalls during resizes. */ private static final int TRANSFER_BUFFER_SIZE = 32; /** * The bin count threshold for using a tree rather than list for a * bin. The value reflects the approximate break-even point for * using tree-based operations. * Note that Doug's version defaults to 8, but when dealing with * Ruby objects it is actually beneficial to avoid TreeNodes * as long as possible as it usually means going into Ruby land. */ private static final int TREE_THRESHOLD = 16; /* * Encodings for special uses of Node hash fields. See above for * explanation. */ static final int MOVED = 0x80000000; // hash field for forwarding nodes static final int LOCKED = 0x40000000; // set/tested only as a bit static final int WAITING = 0xc0000000; // both bits set/tested together static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash /* ---------------- Fields -------------- */ /** * The array of bins. Lazily initialized upon first insertion. * Size is always a power of two. Accessed directly by iterators. */ transient volatile AtomicReferenceArray table; /** * The counter maintaining number of elements. */ private transient LongAdder counter; /** * Table initialization and resizing control. When negative, the * table is being initialized or resized. Otherwise, when table is * null, holds the initial table size to use upon creation, or 0 * for default. After initialization, holds the next element count * value upon which to resize the table. */ private transient volatile int sizeCtl; // views private transient KeySetView keySet; private transient ValuesView values; private transient EntrySetView entrySet; /** For serialization compatibility. Null unless serialized; see below */ private Segment[] segments; static AtomicIntegerFieldUpdater SIZE_CTRL_UPDATER = AtomicIntegerFieldUpdater.newUpdater(ConcurrentHashMapV8.class, "sizeCtl"); /* ---------------- Table element access -------------- */ /* * Volatile access methods are used for table elements as well as * elements of in-progress next table while resizing. Uses are * null checked by callers, and implicitly bounds-checked, relying * on the invariants that tab arrays have non-zero size, and all * indices are masked with (tab.length - 1) which is never * negative and always less than length. Note that, to be correct * wrt arbitrary concurrency errors by users, bounds checks must * operate on local variables, which accounts for some odd-looking * inline assignments below. */ static final Node tabAt(AtomicReferenceArray tab, int i) { // used by Iter return tab.get(i); } private static final boolean casTabAt(AtomicReferenceArray tab, int i, Node c, Node v) { return tab.compareAndSet(i, c, v); } private static final void setTabAt(AtomicReferenceArray tab, int i, Node v) { tab.set(i, v); } /* ---------------- Nodes -------------- */ /** * Key-value entry. Note that this is never exported out as a * user-visible Map.Entry (see MapEntry below). Nodes with a hash * field of MOVED are special, and do not contain user keys or * values. Otherwise, keys are never null, and null val fields * indicate that a node is in the process of being deleted or * created. For purposes of read-only access, a key may be read * before a val, but can only be used after checking val to be * non-null. */ static class Node { volatile int hash; final Object key; volatile Object val; volatile Node next; static AtomicIntegerFieldUpdater HASH_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Node.class, "hash"); Node(int hash, Object key, Object val, Node next) { this.hash = hash; this.key = key; this.val = val; this.next = next; } /** CompareAndSet the hash field */ final boolean casHash(int cmp, int val) { return HASH_UPDATER.compareAndSet(this, cmp, val); } /** The number of spins before blocking for a lock */ static final int MAX_SPINS = Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1; /** * Spins a while if LOCKED bit set and this node is the first * of its bin, and then sets WAITING bits on hash field and * blocks (once) if they are still set. It is OK for this * method to return even if lock is not available upon exit, * which enables these simple single-wait mechanics. * * The corresponding signalling operation is performed within * callers: Upon detecting that WAITING has been set when * unlocking lock (via a failed CAS from non-waiting LOCKED * state), unlockers acquire the sync lock and perform a * notifyAll. * * The initial sanity check on tab and bounds is not currently * necessary in the only usages of this method, but enables * use in other future contexts. */ final void tryAwaitLock(AtomicReferenceArray tab, int i) { if (tab != null && i >= 0 && i < tab.length()) { // sanity check int r = ThreadLocalRandom.current().nextInt(); // randomize spins int spins = MAX_SPINS, h; while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { if (spins >= 0) { r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift if (r >= 0 && --spins == 0) Thread.yield(); // yield before block } else if (casHash(h, h | WAITING)) { synchronized (this) { if (tabAt(tab, i) == this && (hash & WAITING) == WAITING) { try { wait(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } else notifyAll(); // possibly won race vs signaller } break; } } } } } /* ---------------- TreeBins -------------- */ /** * Nodes for use in TreeBins */ static final class TreeNode extends Node { TreeNode parent; // red-black tree links TreeNode left; TreeNode right; TreeNode prev; // needed to unlink next upon deletion boolean red; TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { super(hash, key, val, next); this.parent = parent; } } /** * A specialized form of red-black tree for use in bins * whose size exceeds a threshold. * * TreeBins use a special form of comparison for search and * related operations (which is the main reason we cannot use * existing collections such as TreeMaps). TreeBins contain * Comparable elements, but may contain others, as well as * elements that are Comparable but not necessarily Comparable * for the same T, so we cannot invoke compareTo among them. To * handle this, the tree is ordered primarily by hash value, then * by getClass().getName() order, and then by Comparator order * among elements of the same class. On lookup at a node, if * elements are not comparable or compare as 0, both left and * right children may need to be searched in the case of tied hash * values. (This corresponds to the full list search that would be * necessary if all elements were non-Comparable and had tied * hashes.) The red-black balancing code is updated from * pre-jdk-collections * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) * based in turn on Cormen, Leiserson, and Rivest "Introduction to * Algorithms" (CLR). * * TreeBins also maintain a separate locking discipline than * regular bins. Because they are forwarded via special MOVED * nodes at bin heads (which can never change once established), * we cannot use those nodes as locks. Instead, TreeBin * extends AbstractQueuedSynchronizer to support a simple form of * read-write lock. For update operations and table validation, * the exclusive form of lock behaves in the same way as bin-head * locks. However, lookups use shared read-lock mechanics to allow * multiple readers in the absence of writers. Additionally, * these lookups do not ever block: While the lock is not * available, they proceed along the slow traversal path (via * next-pointers) until the lock becomes available or the list is * exhausted, whichever comes first. (These cases are not fast, * but maximize aggregate expected throughput.) The AQS mechanics * for doing this are straightforward. The lock state is held as * AQS getState(). Read counts are negative; the write count (1) * is positive. There are no signalling preferences among readers * and writers. Since we don't need to export full Lock API, we * just override the minimal AQS methods and use them directly. */ static final class TreeBin extends AbstractQueuedSynchronizer { private static final long serialVersionUID = 2249069246763182397L; transient TreeNode root; // root of tree transient TreeNode first; // head of next-pointer list /* AQS overrides */ public final boolean isHeldExclusively() { return getState() > 0; } public final boolean tryAcquire(int ignore) { if (compareAndSetState(0, 1)) { setExclusiveOwnerThread(Thread.currentThread()); return true; } return false; } public final boolean tryRelease(int ignore) { setExclusiveOwnerThread(null); setState(0); return true; } public final int tryAcquireShared(int ignore) { for (int c;;) { if ((c = getState()) > 0) return -1; if (compareAndSetState(c, c -1)) return 1; } } public final boolean tryReleaseShared(int ignore) { int c; do {} while (!compareAndSetState(c = getState(), c + 1)); return c == -1; } /** From CLR */ private void rotateLeft(TreeNode p) { if (p != null) { TreeNode r = p.right, pp, rl; if ((rl = p.right = r.left) != null) rl.parent = p; if ((pp = r.parent = p.parent) == null) root = r; else if (pp.left == p) pp.left = r; else pp.right = r; r.left = p; p.parent = r; } } /** From CLR */ private void rotateRight(TreeNode p) { if (p != null) { TreeNode l = p.left, pp, lr; if ((lr = p.left = l.right) != null) lr.parent = p; if ((pp = l.parent = p.parent) == null) root = l; else if (pp.right == p) pp.right = l; else pp.left = l; l.right = p; p.parent = l; } } @SuppressWarnings("unchecked") final TreeNode getTreeNode (int h, Object k, TreeNode p) { return getTreeNode(h, (RubyObject)k, p); } /** * Returns the TreeNode (or null if not found) for the given key * starting at given root. */ @SuppressWarnings("unchecked") final TreeNode getTreeNode (int h, RubyObject k, TreeNode p) { RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); while (p != null) { int dir, ph; RubyObject pk; RubyClass pc; if ((ph = p.hash) == h) { if ((pk = (RubyObject)p.key) == k || k.equals(pk)) return p; if (c != (pc = (RubyClass)pk.getMetaClass()) || kNotComparable || (dir = rubyCompare(k, pk)) == 0) { dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); if (dir == 0) { // if still stuck, need to check both sides TreeNode r = null, pl, pr; // try to recurse on the right if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) return r; // try to continue iterating on the left side else if ((pl = p.left) != null && h <= pl.hash) dir = -1; else // no matching node found return null; } } } else dir = (h < ph) ? -1 : 1; p = (dir > 0) ? p.right : p.left; } return null; } int rubyCompare(RubyObject l, RubyObject r) { ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); IRubyObject result; try { result = l.callMethod(context, "<=>", r); } catch (RaiseException e) { // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys if (context.runtime.getNoMethodError().isInstance(e.getException())) { return 0; } throw e; } return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); } /** * Wrapper for getTreeNode used by CHM.get. Tries to obtain * read-lock to call getTreeNode, but during failure to get * lock, searches along next links. */ final Object getValue(int h, Object k) { Node r = null; int c = getState(); // Must read lock state first for (Node e = first; e != null; e = e.next) { if (c <= 0 && compareAndSetState(c, c - 1)) { try { r = getTreeNode(h, k, root); } finally { releaseShared(0); } break; } else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { r = e; break; } else c = getState(); } return r == null ? null : r.val; } @SuppressWarnings("unchecked") final TreeNode putTreeNode (int h, Object k, Object v) { return putTreeNode(h, (RubyObject)k, v); } /** * Finds or adds a node. * @return null if added */ @SuppressWarnings("unchecked") final TreeNode putTreeNode (int h, RubyObject k, Object v) { RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); TreeNode pp = root, p = null; int dir = 0; while (pp != null) { // find existing node or leaf to insert at int ph; RubyObject pk; RubyClass pc; p = pp; if ((ph = p.hash) == h) { if ((pk = (RubyObject)p.key) == k || k.equals(pk)) return p; if (c != (pc = pk.getMetaClass()) || kNotComparable || (dir = rubyCompare(k, pk)) == 0) { dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); if (dir == 0) { // if still stuck, need to check both sides TreeNode r = null, pr; // try to recurse on the right if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) return r; else // continue descending down the left subtree dir = -1; } } } else dir = (h < ph) ? -1 : 1; pp = (dir > 0) ? p.right : p.left; } TreeNode f = first; TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); if (p == null) root = x; else { // attach and rebalance; adapted from CLR TreeNode xp, xpp; if (f != null) f.prev = x; if (dir <= 0) p.left = x; else p.right = x; x.red = true; while (x != null && (xp = x.parent) != null && xp.red && (xpp = xp.parent) != null) { TreeNode xppl = xpp.left; if (xp == xppl) { TreeNode y = xpp.right; if (y != null && y.red) { y.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.right) { rotateLeft(x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; rotateRight(xpp); } } } } else { TreeNode y = xppl; if (y != null && y.red) { y.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.left) { rotateRight(x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; rotateLeft(xpp); } } } } } TreeNode r = root; if (r != null && r.red) r.red = false; } return null; } /** * Removes the given node, that must be present before this * call. This is messier than typical red-black deletion code * because we cannot swap the contents of an interior node * with a leaf successor that is pinned by "next" pointers * that are accessible independently of lock. So instead we * swap the tree linkages. */ final void deleteTreeNode(TreeNode p) { TreeNode next = (TreeNode)p.next; // unlink traversal pointers TreeNode pred = p.prev; if (pred == null) first = next; else pred.next = next; if (next != null) next.prev = pred; TreeNode replacement; TreeNode pl = p.left; TreeNode pr = p.right; if (pl != null && pr != null) { TreeNode s = pr, sl; while ((sl = s.left) != null) // find successor s = sl; boolean c = s.red; s.red = p.red; p.red = c; // swap colors TreeNode sr = s.right; TreeNode pp = p.parent; if (s == pr) { // p was s's direct parent p.parent = s; s.right = p; } else { TreeNode sp = s.parent; if ((p.parent = sp) != null) { if (s == sp.left) sp.left = p; else sp.right = p; } if ((s.right = pr) != null) pr.parent = s; } p.left = null; if ((p.right = sr) != null) sr.parent = p; if ((s.left = pl) != null) pl.parent = s; if ((s.parent = pp) == null) root = s; else if (p == pp.left) pp.left = s; else pp.right = s; replacement = sr; } else replacement = (pl != null) ? pl : pr; TreeNode pp = p.parent; if (replacement == null) { if (pp == null) { root = null; return; } replacement = p; } else { replacement.parent = pp; if (pp == null) root = replacement; else if (p == pp.left) pp.left = replacement; else pp.right = replacement; p.left = p.right = p.parent = null; } if (!p.red) { // rebalance, from CLR TreeNode x = replacement; while (x != null) { TreeNode xp, xpl; if (x.red || (xp = x.parent) == null) { x.red = false; break; } if (x == (xpl = xp.left)) { TreeNode sib = xp.right; if (sib != null && sib.red) { sib.red = false; xp.red = true; rotateLeft(xp); sib = (xp = x.parent) == null ? null : xp.right; } if (sib == null) x = xp; else { TreeNode sl = sib.left, sr = sib.right; if ((sr == null || !sr.red) && (sl == null || !sl.red)) { sib.red = true; x = xp; } else { if (sr == null || !sr.red) { if (sl != null) sl.red = false; sib.red = true; rotateRight(sib); sib = (xp = x.parent) == null ? null : xp.right; } if (sib != null) { sib.red = (xp == null) ? false : xp.red; if ((sr = sib.right) != null) sr.red = false; } if (xp != null) { xp.red = false; rotateLeft(xp); } x = root; } } } else { // symmetric TreeNode sib = xpl; if (sib != null && sib.red) { sib.red = false; xp.red = true; rotateRight(xp); sib = (xp = x.parent) == null ? null : xp.left; } if (sib == null) x = xp; else { TreeNode sl = sib.left, sr = sib.right; if ((sl == null || !sl.red) && (sr == null || !sr.red)) { sib.red = true; x = xp; } else { if (sl == null || !sl.red) { if (sr != null) sr.red = false; sib.red = true; rotateLeft(sib); sib = (xp = x.parent) == null ? null : xp.left; } if (sib != null) { sib.red = (xp == null) ? false : xp.red; if ((sl = sib.left) != null) sl.red = false; } if (xp != null) { xp.red = false; rotateRight(xp); } x = root; } } } } } if (p == replacement && (pp = p.parent) != null) { if (p == pp.left) // detach pointers pp.left = null; else if (p == pp.right) pp.right = null; p.parent = null; } } } /* ---------------- Collision reduction methods -------------- */ /** * Spreads higher bits to lower, and also forces top 2 bits to 0. * Because the table uses power-of-two masking, sets of hashes * that vary only in bits above the current mask will always * collide. (Among known examples are sets of Float keys holding * consecutive whole numbers in small tables.) To counter this, * we apply a transform that spreads the impact of higher bits * downward. There is a tradeoff between speed, utility, and * quality of bit-spreading. Because many common sets of hashes * are already reasonably distributed across bits (so don't benefit * from spreading), and because we use trees to handle large sets * of collisions in bins, we don't need excessively high quality. */ private static final int spread(int h) { h ^= (h >>> 18) ^ (h >>> 12); return (h ^ (h >>> 10)) & HASH_BITS; } /** * Replaces a list bin with a tree bin. Call only when locked. * Fails to replace if the given key is non-comparable or table * is, or needs, resizing. */ private final void replaceWithTreeBin(AtomicReferenceArray tab, int index, Object key) { if ((key instanceof Comparable) && (tab.length() >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { TreeBin t = new TreeBin(); for (Node e = tabAt(tab, index); e != null; e = e.next) t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); setTabAt(tab, index, new Node(MOVED, t, null, null)); } } /* ---------------- Internal access and update methods -------------- */ /** Implementation for get and containsKey */ private final Object internalGet(Object k) { int h = spread(k.hashCode()); retry: for (AtomicReferenceArray tab = table; tab != null;) { Node e, p; Object ek, ev; int eh; // locals to read fields once for (e = tabAt(tab, (tab.length() - 1) & h); e != null; e = e.next) { if ((eh = e.hash) == MOVED) { if ((ek = e.key) instanceof TreeBin) // search TreeBin return ((TreeBin)ek).getValue(h, k); else { // restart with new table tab = (AtomicReferenceArray)ek; continue retry; } } else if ((eh & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) return ev; } break; } return null; } /** * Implementation for the four public remove/replace methods: * Replaces node value with v, conditional upon match of cv if * non-null. If resulting value is null, delete. */ private final Object internalReplace(Object k, Object v, Object cv) { int h = spread(k.hashCode()); Object oldVal = null; for (AtomicReferenceArray tab = table;;) { Node f; int i, fh; Object fk; if (tab == null || (f = tabAt(tab, i = (tab.length() - 1) & h)) == null) break; else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean validated = false; boolean deleted = false; t.acquire(0); try { if (tabAt(tab, i) == f) { validated = true; TreeNode p = t.getTreeNode(h, k, t.root); if (p != null) { Object pv = p.val; if (cv == null || cv == pv || cv.equals(pv)) { oldVal = pv; if ((p.val = v) == null) { deleted = true; t.deleteTreeNode(p); } } } } } finally { t.release(0); } if (validated) { if (deleted) counter.add(-1L); break; } } else tab = (AtomicReferenceArray)fk; } else if ((fh & HASH_BITS) != h && f.next == null) // precheck break; // rules out possible existence else if ((fh & LOCKED) != 0) { checkForResize(); // try resizing if can't get lock f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { boolean validated = false; boolean deleted = false; try { if (tabAt(tab, i) == f) { validated = true; for (Node e = f, pred = null;;) { Object ek, ev; if ((e.hash & HASH_BITS) == h && ((ev = e.val) != null) && ((ek = e.key) == k || k.equals(ek))) { if (cv == null || cv == ev || cv.equals(ev)) { oldVal = ev; if ((e.val = v) == null) { deleted = true; Node en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } } break; } pred = e; if ((e = e.next) == null) break; } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (validated) { if (deleted) counter.add(-1L); break; } } } return oldVal; } /* * Internal versions of the six insertion methods, each a * little more complicated than the last. All have * the same basic structure as the first (internalPut): * 1. If table uninitialized, create * 2. If bin empty, try to CAS new node * 3. If bin stale, use new table * 4. if bin converted to TreeBin, validate and relay to TreeBin methods * 5. Lock and validate; if valid, scan and add or update * * The others interweave other checks and/or alternative actions: * * Plain put checks for and performs resize after insertion. * * putIfAbsent prescans for mapping without lock (and fails to add * if present), which also makes pre-emptive resize checks worthwhile. * * computeIfAbsent extends form used in putIfAbsent with additional * mechanics to deal with, calls, potential exceptions and null * returns from function call. * * compute uses the same function-call mechanics, but without * the prescans * * merge acts as putIfAbsent in the absent case, but invokes the * update function if present * * putAll attempts to pre-allocate enough table space * and more lazily performs count updates and checks. * * Someday when details settle down a bit more, it might be worth * some factoring to reduce sprawl. */ /** Implementation for put */ private final Object internalPut(Object k, Object v) { int h = spread(k.hashCode()); int count = 0; for (AtomicReferenceArray tab = table;;) { int i; Node f; int fh; Object fk; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node(h, k, v, null))) break; // no lock when adding to empty bin } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; Object oldVal = null; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 2; TreeNode p = t.putTreeNode(h, k, v); if (p != null) { oldVal = p.val; p.val = v; } } } finally { t.release(0); } if (count != 0) { if (oldVal != null) return oldVal; break; } } else tab = (AtomicReferenceArray)fk; } else if ((fh & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { Object oldVal = null; try { // needed in case equals() throws if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { oldVal = ev; e.val = v; break; } Node last = e; if ((e = e.next) == null) { last.next = new Node(h, k, v, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { // unlock and signal if needed if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (oldVal != null) return oldVal; if (tab.length() <= 64) count = 2; break; } } } counter.add(1L); if (count > 1) checkForResize(); return null; } /** Implementation for putIfAbsent */ private final Object internalPutIfAbsent(Object k, Object v) { int h = spread(k.hashCode()); int count = 0; for (AtomicReferenceArray tab = table;;) { int i; Node f; int fh; Object fk, fv; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node(h, k, v, null))) break; } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; Object oldVal = null; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 2; TreeNode p = t.putTreeNode(h, k, v); if (p != null) oldVal = p.val; } } finally { t.release(0); } if (count != 0) { if (oldVal != null) return oldVal; break; } } else tab = (AtomicReferenceArray)fk; } else if ((fh & HASH_BITS) == h && (fv = f.val) != null && ((fk = f.key) == k || k.equals(fk))) return fv; else { Node g = f.next; if (g != null) { // at least 2 nodes -- search and maybe resize for (Node e = g;;) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) return ev; if ((e = e.next) == null) { checkForResize(); break; } } } if (((fh = f.hash) & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { Object oldVal = null; try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { oldVal = ev; break; } Node last = e; if ((e = e.next) == null) { last.next = new Node(h, k, v, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (oldVal != null) return oldVal; if (tab.length() <= 64) count = 2; break; } } } } counter.add(1L); if (count > 1) checkForResize(); return null; } /** Implementation for computeIfAbsent */ private final Object internalComputeIfAbsent(K k, Fun mf) { int h = spread(k.hashCode()); Object val = null; int count = 0; for (AtomicReferenceArray tab = table;;) { Node f; int i, fh; Object fk, fv; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { Node node = new Node(fh = h | LOCKED, k, null, null); if (casTabAt(tab, i, null, node)) { count = 1; try { if ((val = mf.apply(k)) != null) node.val = val; } finally { if (val == null) setTabAt(tab, i, null); if (!node.casHash(fh, h)) { node.hash = h; synchronized (node) { node.notifyAll(); }; } } } if (count != 0) break; } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean added = false; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 1; TreeNode p = t.getTreeNode(h, k, t.root); if (p != null) val = p.val; else if ((val = mf.apply(k)) != null) { added = true; count = 2; t.putTreeNode(h, k, val); } } } finally { t.release(0); } if (count != 0) { if (!added) return val; break; } } else tab = (AtomicReferenceArray)fk; } else if ((fh & HASH_BITS) == h && (fv = f.val) != null && ((fk = f.key) == k || k.equals(fk))) return fv; else { Node g = f.next; if (g != null) { for (Node e = g;;) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) return ev; if ((e = e.next) == null) { checkForResize(); break; } } } if (((fh = f.hash) & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { boolean added = false; try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { val = ev; break; } Node last = e; if ((e = e.next) == null) { if ((val = mf.apply(k)) != null) { added = true; last.next = new Node(h, k, val, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); } break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (!added) return val; if (tab.length() <= 64) count = 2; break; } } } } if (val != null) { counter.add(1L); if (count > 1) checkForResize(); } return val; } /** Implementation for compute */ @SuppressWarnings("unchecked") private final Object internalCompute (K k, boolean onlyIfPresent, BiFun mf) { int h = spread(k.hashCode()); Object val = null; int delta = 0; int count = 0; for (AtomicReferenceArray tab = table;;) { Node f; int i, fh; Object fk; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { if (onlyIfPresent) break; Node node = new Node(fh = h | LOCKED, k, null, null); if (casTabAt(tab, i, null, node)) { try { count = 1; if ((val = mf.apply(k, null)) != null) { node.val = val; delta = 1; } } finally { if (delta == 0) setTabAt(tab, i, null); if (!node.casHash(fh, h)) { node.hash = h; synchronized (node) { node.notifyAll(); }; } } } if (count != 0) break; } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 1; TreeNode p = t.getTreeNode(h, k, t.root); Object pv; if (p == null) { if (onlyIfPresent) break; pv = null; } else pv = p.val; if ((val = mf.apply(k, (V)pv)) != null) { if (p != null) p.val = val; else { count = 2; delta = 1; t.putTreeNode(h, k, val); } } else if (p != null) { delta = -1; t.deleteTreeNode(p); } } } finally { t.release(0); } if (count != 0) break; } else tab = (AtomicReferenceArray)fk; } else if ((fh & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f, pred = null;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { val = mf.apply(k, (V)ev); if (val != null) e.val = val; else { delta = -1; Node en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { pred.next = new Node(h, k, val, null); delta = 1; if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); } break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (tab.length() <= 64) count = 2; break; } } } if (delta != 0) { counter.add((long)delta); if (count > 1) checkForResize(); } return val; } /** Implementation for merge */ @SuppressWarnings("unchecked") private final Object internalMerge (K k, V v, BiFun mf) { int h = spread(k.hashCode()); Object val = null; int delta = 0; int count = 0; for (AtomicReferenceArray tab = table;;) { int i; Node f; int fh; Object fk, fv; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node(h, k, v, null))) { delta = 1; val = v; break; } } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 1; TreeNode p = t.getTreeNode(h, k, t.root); val = (p == null) ? v : mf.apply((V)p.val, v); if (val != null) { if (p != null) p.val = val; else { count = 2; delta = 1; t.putTreeNode(h, k, val); } } else if (p != null) { delta = -1; t.deleteTreeNode(p); } } } finally { t.release(0); } if (count != 0) break; } else tab = (AtomicReferenceArray)fk; } else if ((fh & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f, pred = null;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { val = mf.apply((V)ev, v); if (val != null) e.val = val; else { delta = -1; Node en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { val = v; pred.next = new Node(h, k, val, null); delta = 1; if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (tab.length() <= 64) count = 2; break; } } } if (delta != 0) { counter.add((long)delta); if (count > 1) checkForResize(); } return val; } /** Implementation for putAll */ private final void internalPutAll(Map m) { tryPresize(m.size()); long delta = 0L; // number of uncommitted additions boolean npe = false; // to throw exception on exit for nulls try { // to clean up counts on other exceptions for (Map.Entry entry : m.entrySet()) { Object k, v; if (entry == null || (k = entry.getKey()) == null || (v = entry.getValue()) == null) { npe = true; break; } int h = spread(k.hashCode()); for (AtomicReferenceArray tab = table;;) { int i; Node f; int fh; Object fk; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length() - 1) & h)) == null){ if (casTabAt(tab, i, null, new Node(h, k, v, null))) { ++delta; break; } } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean validated = false; t.acquire(0); try { if (tabAt(tab, i) == f) { validated = true; TreeNode p = t.getTreeNode(h, k, t.root); if (p != null) p.val = v; else { t.putTreeNode(h, k, v); ++delta; } } } finally { t.release(0); } if (validated) break; } else tab = (AtomicReferenceArray)fk; } else if ((fh & LOCKED) != 0) { counter.add(delta); delta = 0L; checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { int count = 0; try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { e.val = v; break; } Node last = e; if ((e = e.next) == null) { ++delta; last.next = new Node(h, k, v, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (count > 1) { counter.add(delta); delta = 0L; checkForResize(); } break; } } } } } finally { if (delta != 0) counter.add(delta); } if (npe) throw new NullPointerException(); } /* ---------------- Table Initialization and Resizing -------------- */ /** * Returns a power of two table size for the given desired capacity. * See Hackers Delight, sec 3.2 */ private static final int tableSizeFor(int c) { int n = c - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; } /** * Initializes table, using the size recorded in sizeCtl. */ private final AtomicReferenceArray initTable() { AtomicReferenceArray tab; int sc; while ((tab = table) == null) { if ((sc = sizeCtl) < 0) Thread.yield(); // lost initialization race; just spin else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { try { if ((tab = table) == null) { int n = (sc > 0) ? sc : DEFAULT_CAPACITY; tab = table = new AtomicReferenceArray(n); sc = n - (n >>> 2); } } finally { sizeCtl = sc; } break; } } return tab; } /** * If table is too small and not already resizing, creates next * table and transfers bins. Rechecks occupancy after a transfer * to see if another resize is already needed because resizings * are lagging additions. */ private final void checkForResize() { AtomicReferenceArray tab; int n, sc; while ((tab = table) != null && (n = tab.length()) < MAXIMUM_CAPACITY && (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { try { if (tab == table) { table = rebuild(tab); sc = (n << 1) - (n >>> 1); } } finally { sizeCtl = sc; } } } /** * Tries to presize table to accommodate the given number of elements. * * @param size number of elements (doesn't need to be perfectly accurate) */ private final void tryPresize(int size) { int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(size + (size >>> 1) + 1); int sc; while ((sc = sizeCtl) >= 0) { AtomicReferenceArray tab = table; int n; if (tab == null || (n = tab.length()) == 0) { n = (sc > c) ? sc : c; if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { try { if (table == tab) { table = new AtomicReferenceArray(n); sc = n - (n >>> 2); } } finally { sizeCtl = sc; } } } else if (c <= sc || n >= MAXIMUM_CAPACITY) break; else if (SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { try { if (table == tab) { table = rebuild(tab); sc = (n << 1) - (n >>> 1); } } finally { sizeCtl = sc; } } } } /* * Moves and/or copies the nodes in each bin to new table. See * above for explanation. * * @return the new table */ private static final AtomicReferenceArray rebuild(AtomicReferenceArray tab) { int n = tab.length(); AtomicReferenceArray nextTab = new AtomicReferenceArray(n << 1); Node fwd = new Node(MOVED, nextTab, null, null); int[] buffer = null; // holds bins to revisit; null until needed Node rev = null; // reverse forwarder; null until needed int nbuffered = 0; // the number of bins in buffer list int bufferIndex = 0; // buffer index of current buffered bin int bin = n - 1; // current non-buffered bin or -1 if none for (int i = bin;;) { // start upwards sweep int fh; Node f; if ((f = tabAt(tab, i)) == null) { if (bin >= 0) { // Unbuffered; no lock needed (or available) if (!casTabAt(tab, i, f, fwd)) continue; } else { // transiently use a locked forwarding node Node g = new Node(MOVED|LOCKED, nextTab, null, null); if (!casTabAt(tab, i, f, g)) continue; setTabAt(nextTab, i, null); setTabAt(nextTab, i + n, null); setTabAt(tab, i, fwd); if (!g.casHash(MOVED|LOCKED, MOVED)) { g.hash = MOVED; synchronized (g) { g.notifyAll(); } } } } else if ((fh = f.hash) == MOVED) { Object fk = f.key; if (fk instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean validated = false; t.acquire(0); try { if (tabAt(tab, i) == f) { validated = true; splitTreeBin(nextTab, i, t); setTabAt(tab, i, fwd); } } finally { t.release(0); } if (!validated) continue; } } else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { boolean validated = false; try { // split to lo and hi lists; copying as needed if (tabAt(tab, i) == f) { validated = true; splitBin(nextTab, i, f); setTabAt(tab, i, fwd); } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (!validated) continue; } else { if (buffer == null) // initialize buffer for revisits buffer = new int[TRANSFER_BUFFER_SIZE]; if (bin < 0 && bufferIndex > 0) { int j = buffer[--bufferIndex]; buffer[bufferIndex] = i; i = j; // swap with another bin continue; } if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { f.tryAwaitLock(tab, i); continue; // no other options -- block } if (rev == null) // initialize reverse-forwarder rev = new Node(MOVED, tab, null, null); if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) continue; // recheck before adding to list buffer[nbuffered++] = i; setTabAt(nextTab, i, rev); // install place-holders setTabAt(nextTab, i + n, rev); } if (bin > 0) i = --bin; else if (buffer != null && nbuffered > 0) { bin = -1; i = buffer[bufferIndex = --nbuffered]; } else return nextTab; } } /** * Splits a normal bin with list headed by e into lo and hi parts; * installs in given table. */ private static void splitBin(AtomicReferenceArray nextTab, int i, Node e) { int bit = nextTab.length() >>> 1; // bit to split on int runBit = e.hash & bit; Node lastRun = e, lo = null, hi = null; for (Node p = e.next; p != null; p = p.next) { int b = p.hash & bit; if (b != runBit) { runBit = b; lastRun = p; } } if (runBit == 0) lo = lastRun; else hi = lastRun; for (Node p = e; p != lastRun; p = p.next) { int ph = p.hash & HASH_BITS; Object pk = p.key, pv = p.val; if ((ph & bit) == 0) lo = new Node(ph, pk, pv, lo); else hi = new Node(ph, pk, pv, hi); } setTabAt(nextTab, i, lo); setTabAt(nextTab, i + bit, hi); } /** * Splits a tree bin into lo and hi parts; installs in given table. */ private static void splitTreeBin(AtomicReferenceArray nextTab, int i, TreeBin t) { int bit = nextTab.length() >>> 1; TreeBin lt = new TreeBin(); TreeBin ht = new TreeBin(); int lc = 0, hc = 0; for (Node e = t.first; e != null; e = e.next) { int h = e.hash & HASH_BITS; Object k = e.key, v = e.val; if ((h & bit) == 0) { ++lc; lt.putTreeNode(h, k, v); } else { ++hc; ht.putTreeNode(h, k, v); } } Node ln, hn; // throw away trees if too small if (lc <= (TREE_THRESHOLD >>> 1)) { ln = null; for (Node p = lt.first; p != null; p = p.next) ln = new Node(p.hash, p.key, p.val, ln); } else ln = new Node(MOVED, lt, null, null); setTabAt(nextTab, i, ln); if (hc <= (TREE_THRESHOLD >>> 1)) { hn = null; for (Node p = ht.first; p != null; p = p.next) hn = new Node(p.hash, p.key, p.val, hn); } else hn = new Node(MOVED, ht, null, null); setTabAt(nextTab, i + bit, hn); } /** * Implementation for clear. Steps through each bin, removing all * nodes. */ private final void internalClear() { long delta = 0L; // negative number of deletions int i = 0; AtomicReferenceArray tab = table; while (tab != null && i < tab.length()) { int fh; Object fk; Node f = tabAt(tab, i); if (f == null) ++i; else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; t.acquire(0); try { if (tabAt(tab, i) == f) { for (Node p = t.first; p != null; p = p.next) { if (p.val != null) { // (currently always true) p.val = null; --delta; } } t.first = null; t.root = null; ++i; } } finally { t.release(0); } } else tab = (AtomicReferenceArray)fk; } else if ((fh & LOCKED) != 0) { counter.add(delta); // opportunistically update count delta = 0L; f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { try { if (tabAt(tab, i) == f) { for (Node e = f; e != null; e = e.next) { if (e.val != null) { // (currently always true) e.val = null; --delta; } } setTabAt(tab, i, null); ++i; } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } } } if (delta != 0) counter.add(delta); } /* ----------------Table Traversal -------------- */ /** * Encapsulates traversal for methods such as containsValue; also * serves as a base class for other iterators and bulk tasks. * * At each step, the iterator snapshots the key ("nextKey") and * value ("nextVal") of a valid node (i.e., one that, at point of * snapshot, has a non-null user value). Because val fields can * change (including to null, indicating deletion), field nextVal * might not be accurate at point of use, but still maintains the * weak consistency property of holding a value that was once * valid. To support iterator.remove, the nextKey field is not * updated (nulled out) when the iterator cannot advance. * * Internal traversals directly access these fields, as in: * {@code while (it.advance() != null) { process(it.nextKey); }} * * Exported iterators must track whether the iterator has advanced * (in hasNext vs next) (by setting/checking/nulling field * nextVal), and then extract key, value, or key-value pairs as * return values of next(). * * The iterator visits once each still-valid node that was * reachable upon iterator construction. It might miss some that * were added to a bin after the bin was visited, which is OK wrt * consistency guarantees. Maintaining this property in the face * of possible ongoing resizes requires a fair amount of * bookkeeping state that is difficult to optimize away amidst * volatile accesses. Even so, traversal maintains reasonable * throughput. * * Normally, iteration proceeds bin-by-bin traversing lists. * However, if the table has been resized, then all future steps * must traverse both the bin at the current index as well as at * (index + baseSize); and so on for further resizings. To * paranoically cope with potential sharing by users of iterators * across threads, iteration terminates if a bounds checks fails * for a table read. * * This class extends ForkJoinTask to streamline parallel * iteration in bulk operations (see BulkTask). This adds only an * int of space overhead, which is close enough to negligible in * cases where it is not needed to not worry about it. Because * ForkJoinTask is Serializable, but iterators need not be, we * need to add warning suppressions. */ @SuppressWarnings("serial") static class Traverser { final ConcurrentHashMapV8 map; Node next; // the next entry to use K nextKey; // cached key field of next V nextVal; // cached val field of next AtomicReferenceArray tab; // current table; updated if resized int index; // index of bin to use next int baseIndex; // current index of initial table int baseLimit; // index bound for initial table int baseSize; // initial table size /** Creates iterator for all entries in the table. */ Traverser(ConcurrentHashMapV8 map) { this.map = map; } /** Creates iterator for split() methods */ Traverser(Traverser it) { ConcurrentHashMapV8 m; AtomicReferenceArray t; if ((m = this.map = it.map) == null) t = null; else if ((t = it.tab) == null && // force parent tab initialization (t = it.tab = m.table) != null) it.baseLimit = it.baseSize = t.length(); this.tab = t; this.baseSize = it.baseSize; it.baseLimit = this.index = this.baseIndex = ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; } /** * Advances next; returns nextVal or null if terminated. * See above for explanation. */ final V advance() { Node e = next; V ev = null; outer: do { if (e != null) // advance past used/skipped node e = e.next; while (e == null) { // get to next non-null bin ConcurrentHashMapV8 m; AtomicReferenceArray t; int b, i, n; Object ek; // checks must use locals if ((t = tab) != null) n = t.length(); else if ((m = map) != null && (t = tab = m.table) != null) n = baseLimit = baseSize = t.length(); else break outer; if ((b = baseIndex) >= baseLimit || (i = index) < 0 || i >= n) break outer; if ((e = tabAt(t, i)) != null && e.hash == MOVED) { if ((ek = e.key) instanceof TreeBin) e = ((TreeBin)ek).first; else { tab = (AtomicReferenceArray)ek; continue; // restarts due to null val } } // visit upper slots if present index = (i += baseSize) < n ? i : (baseIndex = b + 1); } nextKey = (K) e.key; } while ((ev = (V) e.val) == null); // skip deleted or special nodes next = e; return nextVal = ev; } public final void remove() { Object k = nextKey; if (k == null && (advance() == null || (k = nextKey) == null)) throw new IllegalStateException(); map.internalReplace(k, null, null); } public final boolean hasNext() { return nextVal != null || advance() != null; } public final boolean hasMoreElements() { return hasNext(); } public final void setRawResult(Object x) { } public R getRawResult() { return null; } public boolean exec() { return true; } } /* ---------------- Public operations -------------- */ /** * Creates a new, empty map with the default initial table size (16). */ public ConcurrentHashMapV8() { this.counter = new LongAdder(); } /** * Creates a new, empty map with an initial table size * accommodating the specified number of elements without the need * to dynamically resize. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative */ public ConcurrentHashMapV8(int initialCapacity) { if (initialCapacity < 0) throw new IllegalArgumentException(); int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); this.counter = new LongAdder(); this.sizeCtl = cap; } /** * Creates a new map with the same mappings as the given map. * * @param m the map */ public ConcurrentHashMapV8(Map m) { this.counter = new LongAdder(); this.sizeCtl = DEFAULT_CAPACITY; internalPutAll(m); } /** * Creates a new, empty map with an initial table size based on * the given number of elements ({@code initialCapacity}) and * initial table density ({@code loadFactor}). * * @param initialCapacity the initial capacity. The implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadFactor the load factor (table density) for * establishing the initial table size * @throws IllegalArgumentException if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, 1); } /** * Creates a new, empty map with an initial table size based on * the given number of elements ({@code initialCapacity}), table * density ({@code loadFactor}), and number of concurrently * updating threads ({@code concurrencyLevel}). * * @param initialCapacity the initial capacity. The implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadFactor the load factor (table density) for * establishing the initial table size * @param concurrencyLevel the estimated number of concurrently * updating threads. The implementation may use this value as * a sizing hint. * @throws IllegalArgumentException if the initial capacity is * negative or the load factor or concurrencyLevel are * nonpositive */ public ConcurrentHashMapV8(int initialCapacity, float loadFactor, int concurrencyLevel) { if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) throw new IllegalArgumentException(); if (initialCapacity < concurrencyLevel) // Use at least as many bins initialCapacity = concurrencyLevel; // as estimated threads long size = (long)(1.0 + (long)initialCapacity / loadFactor); int cap = (size >= (long)MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)size); this.counter = new LongAdder(); this.sizeCtl = cap; } /** * Creates a new {@link Set} backed by a ConcurrentHashMapV8 * from the given type to {@code Boolean.TRUE}. * * @return the new set */ public static KeySetView newKeySet() { return new KeySetView(new ConcurrentHashMapV8(), Boolean.TRUE); } /** * Creates a new {@link Set} backed by a ConcurrentHashMapV8 * from the given type to {@code Boolean.TRUE}. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative * @return the new set */ public static KeySetView newKeySet(int initialCapacity) { return new KeySetView(new ConcurrentHashMapV8(initialCapacity), Boolean.TRUE); } /** * {@inheritDoc} */ public boolean isEmpty() { return counter.sum() <= 0L; // ignore transient negative values } /** * {@inheritDoc} */ public int size() { long n = counter.sum(); return ((n < 0L) ? 0 : (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int)n); } /** * Returns the number of mappings. This method should be used * instead of {@link #size} because a ConcurrentHashMapV8 may * contain more mappings than can be represented as an int. The * value returned is a snapshot; the actual count may differ if * there are ongoing concurrent insertions or removals. * * @return the number of mappings */ public long mappingCount() { long n = counter.sum(); return (n < 0L) ? 0L : n; // ignore transient negative values } /** * Returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * *

More formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code key.equals(k)}, * then this method returns {@code v}; otherwise it returns * {@code null}. (There can be at most one such mapping.) * * @throws NullPointerException if the specified key is null */ @SuppressWarnings("unchecked") public V get(Object key) { if (key == null) throw new NullPointerException(); return (V)internalGet(key); } /** * Returns the value to which the specified key is mapped, * or the given defaultValue if this map contains no mapping for the key. * * @param key the key * @param defaultValue the value to return if this map contains * no mapping for the given key * @return the mapping for the key, if present; else the defaultValue * @throws NullPointerException if the specified key is null */ @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { if (key == null) throw new NullPointerException(); V v = (V) internalGet(key); return v == null ? defaultValue : v; } /** * Tests if the specified object is a key in this table. * * @param key possible key * @return {@code true} if and only if the specified object * is a key in this table, as determined by the * {@code equals} method; {@code false} otherwise * @throws NullPointerException if the specified key is null */ public boolean containsKey(Object key) { if (key == null) throw new NullPointerException(); return internalGet(key) != null; } /** * Returns {@code true} if this map maps one or more keys to the * specified value. Note: This method may require a full traversal * of the map, and is much slower than method {@code containsKey}. * * @param value value whose presence in this map is to be tested * @return {@code true} if this map maps one or more keys to the * specified value * @throws NullPointerException if the specified value is null */ public boolean containsValue(Object value) { if (value == null) throw new NullPointerException(); Object v; Traverser it = new Traverser(this); while ((v = it.advance()) != null) { if (v == value || value.equals(v)) return true; } return false; } public K findKey(Object value) { if (value == null) throw new NullPointerException(); Object v; Traverser it = new Traverser(this); while ((v = it.advance()) != null) { if (v == value || value.equals(v)) return it.nextKey; } return null; } /** * Legacy method testing if some key maps into the specified value * in this table. This method is identical in functionality to * {@link #containsValue}, and exists solely to ensure * full compatibility with class {@link java.util.Hashtable}, * which supported this method prior to introduction of the * Java Collections framework. * * @param value a value to search for * @return {@code true} if and only if some key maps to the * {@code value} argument in this table as * determined by the {@code equals} method; * {@code false} otherwise * @throws NullPointerException if the specified value is null */ public boolean contains(Object value) { return containsValue(value); } /** * Maps the specified key to the specified value in this table. * Neither the key nor the value can be null. * *

The value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key or value is null */ @SuppressWarnings("unchecked") public V put(K key, V value) { if (key == null || value == null) throw new NullPointerException(); return (V)internalPut(key, value); } /** * {@inheritDoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { if (key == null || value == null) throw new NullPointerException(); return (V)internalPutIfAbsent(key, value); } /** * Copies all of the mappings from the specified map to this one. * These mappings replace any mappings that this map had for any of the * keys currently in the specified map. * * @param m mappings to be stored in this map */ public void putAll(Map m) { internalPutAll(m); } /** * If the specified key is not already associated with a value, * computes its value using the given mappingFunction and enters * it into the map unless null. This is equivalent to *

 {@code
     * if (map.containsKey(key))
     *   return map.get(key);
     * value = mappingFunction.apply(key);
     * if (value != null)
     *   map.put(key, value);
     * return value;}
* * except that the action is performed atomically. If the * function returns {@code null} no mapping is recorded. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and no mapping is recorded. Some * attempted update operations on this map by other threads may be * blocked while computation is in progress, so the computation * should be short and simple, and must not attempt to update any * other mappings of this Map. The most appropriate usage is to * construct a new object serving as an initial mapped value, or * memoized result, as in: * *
 {@code
     * map.computeIfAbsent(key, new Fun() {
     *   public V map(K k) { return new Value(f(k)); }});}
* * @param key key with which the specified value is to be associated * @param mappingFunction the function to compute a value * @return the current (existing or computed) value associated with * the specified key, or null if the computed value is null * @throws NullPointerException if the specified key or mappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the mappingFunction does so, * in which case the mapping is left unestablished */ @SuppressWarnings("unchecked") public V computeIfAbsent (K key, Fun mappingFunction) { if (key == null || mappingFunction == null) throw new NullPointerException(); return (V)internalComputeIfAbsent(key, mappingFunction); } /** * If the given key is present, computes a new mapping value given a key and * its current mapped value. This is equivalent to *
 {@code
     *   if (map.containsKey(key)) {
     *     value = remappingFunction.apply(key, map.get(key));
     *     if (value != null)
     *       map.put(key, value);
     *     else
     *       map.remove(key);
     *   }
     * }
* * except that the action is performed atomically. If the * function returns {@code null}, the mapping is removed. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and the current mapping is left * unchanged. Some attempted update operations on this map by * other threads may be blocked while computation is in progress, * so the computation should be short and simple, and must not * attempt to update any other mappings of this Map. For example, * to either create or append new messages to a value mapping: * * @param key key with which the specified value is to be associated * @param remappingFunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws NullPointerException if the specified key or remappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ @SuppressWarnings("unchecked") public V computeIfPresent (K key, BiFun remappingFunction) { if (key == null || remappingFunction == null) throw new NullPointerException(); return (V)internalCompute(key, true, remappingFunction); } /** * Computes a new mapping value given a key and * its current mapped value (or {@code null} if there is no current * mapping). This is equivalent to *
 {@code
     *   value = remappingFunction.apply(key, map.get(key));
     *   if (value != null)
     *     map.put(key, value);
     *   else
     *     map.remove(key);
     * }
* * except that the action is performed atomically. If the * function returns {@code null}, the mapping is removed. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and the current mapping is left * unchanged. Some attempted update operations on this map by * other threads may be blocked while computation is in progress, * so the computation should be short and simple, and must not * attempt to update any other mappings of this Map. For example, * to either create or append new messages to a value mapping: * *
 {@code
     * Map map = ...;
     * final String msg = ...;
     * map.compute(key, new BiFun() {
     *   public String apply(Key k, String v) {
     *    return (v == null) ? msg : v + msg;});}}
* * @param key key with which the specified value is to be associated * @param remappingFunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws NullPointerException if the specified key or remappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ @SuppressWarnings("unchecked") public V compute (K key, BiFun remappingFunction) { if (key == null || remappingFunction == null) throw new NullPointerException(); return (V)internalCompute(key, false, remappingFunction); } /** * If the specified key is not already associated * with a value, associate it with the given value. * Otherwise, replace the value with the results of * the given remapping function. This is equivalent to: *
 {@code
     *   if (!map.containsKey(key))
     *     map.put(value);
     *   else {
     *     newValue = remappingFunction.apply(map.get(key), value);
     *     if (value != null)
     *       map.put(key, value);
     *     else
     *       map.remove(key);
     *   }
     * }
* except that the action is performed atomically. If the * function returns {@code null}, the mapping is removed. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and the current mapping is left * unchanged. Some attempted update operations on this map by * other threads may be blocked while computation is in progress, * so the computation should be short and simple, and must not * attempt to update any other mappings of this Map. */ @SuppressWarnings("unchecked") public V merge (K key, V value, BiFun remappingFunction) { if (key == null || value == null || remappingFunction == null) throw new NullPointerException(); return (V)internalMerge(key, value, remappingFunction); } /** * Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * * @param key the key that needs to be removed * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key is null */ @SuppressWarnings("unchecked") public V remove(Object key) { if (key == null) throw new NullPointerException(); return (V)internalReplace(key, null, null); } /** * {@inheritDoc} * * @throws NullPointerException if the specified key is null */ public boolean remove(Object key, Object value) { if (key == null) throw new NullPointerException(); if (value == null) return false; return internalReplace(key, null, value) != null; } /** * {@inheritDoc} * * @throws NullPointerException if any of the arguments are null */ public boolean replace(K key, V oldValue, V newValue) { if (key == null || oldValue == null || newValue == null) throw new NullPointerException(); return internalReplace(key, newValue, oldValue) != null; } /** * {@inheritDoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ @SuppressWarnings("unchecked") public V replace(K key, V value) { if (key == null || value == null) throw new NullPointerException(); return (V)internalReplace(key, value, null); } /** * Removes all of the mappings from this map. */ public void clear() { internalClear(); } /** * Returns a {@link Set} view of the keys contained in this map. * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. * * @return the set view */ public KeySetView keySet() { KeySetView ks = keySet; return (ks != null) ? ks : (keySet = new KeySetView(this, null)); } /** * Returns a {@link Set} view of the keys in this map, using the * given common mapped value for any additions (i.e., {@link * Collection#add} and {@link Collection#addAll}). This is of * course only appropriate if it is acceptable to use the same * value for all additions from this view. * * @param mappedValue the mapped value to use for any * additions. * @return the set view * @throws NullPointerException if the mappedValue is null */ public KeySetView keySet(V mappedValue) { if (mappedValue == null) throw new NullPointerException(); return new KeySetView(this, mappedValue); } /** * Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are * reflected in the collection, and vice-versa. */ public ValuesView values() { ValuesView vs = values; return (vs != null) ? vs : (values = new ValuesView(this)); } /** * Returns a {@link Set} view of the mappings contained in this map. * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. The set supports element * removal, which removes the corresponding mapping from the map, * via the {@code Iterator.remove}, {@code Set.remove}, * {@code removeAll}, {@code retainAll}, and {@code clear} * operations. It does not support the {@code add} or * {@code addAll} operations. * *

The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. */ public Set> entrySet() { EntrySetView es = entrySet; return (es != null) ? es : (entrySet = new EntrySetView(this)); } /** * Returns an enumeration of the keys in this table. * * @return an enumeration of the keys in this table * @see #keySet() */ public Enumeration keys() { return new KeyIterator(this); } /** * Returns an enumeration of the values in this table. * * @return an enumeration of the values in this table * @see #values() */ public Enumeration elements() { return new ValueIterator(this); } /** * Returns a partitionable iterator of the keys in this map. * * @return a partitionable iterator of the keys in this map */ public Spliterator keySpliterator() { return new KeyIterator(this); } /** * Returns a partitionable iterator of the values in this map. * * @return a partitionable iterator of the values in this map */ public Spliterator valueSpliterator() { return new ValueIterator(this); } /** * Returns a partitionable iterator of the entries in this map. * * @return a partitionable iterator of the entries in this map */ public Spliterator> entrySpliterator() { return new EntryIterator(this); } /** * Returns the hash code value for this {@link Map}, i.e., * the sum of, for each key-value pair in the map, * {@code key.hashCode() ^ value.hashCode()}. * * @return the hash code value for this map */ public int hashCode() { int h = 0; Traverser it = new Traverser(this); Object v; while ((v = it.advance()) != null) { h += it.nextKey.hashCode() ^ v.hashCode(); } return h; } /** * Returns a string representation of this map. The string * representation consists of a list of key-value mappings (in no * particular order) enclosed in braces ("{@code {}}"). Adjacent * mappings are separated by the characters {@code ", "} (comma * and space). Each key-value mapping is rendered as the key * followed by an equals sign ("{@code =}") followed by the * associated value. * * @return a string representation of this map */ public String toString() { Traverser it = new Traverser(this); StringBuilder sb = new StringBuilder(); sb.append('{'); Object v; if ((v = it.advance()) != null) { for (;;) { Object k = it.nextKey; sb.append(k == this ? "(this Map)" : k); sb.append('='); sb.append(v == this ? "(this Map)" : v); if ((v = it.advance()) == null) break; sb.append(',').append(' '); } } return sb.append('}').toString(); } /** * Compares the specified object with this map for equality. * Returns {@code true} if the given object is a map with the same * mappings as this map. This operation may return misleading * results if either map is concurrently modified during execution * of this method. * * @param o object to be compared for equality with this map * @return {@code true} if the specified object is equal to this map */ public boolean equals(Object o) { if (o != this) { if (!(o instanceof Map)) return false; Map m = (Map) o; Traverser it = new Traverser(this); Object val; while ((val = it.advance()) != null) { Object v = m.get(it.nextKey); if (v == null || (v != val && !v.equals(val))) return false; } for (Map.Entry e : m.entrySet()) { Object mk, mv, v; if ((mk = e.getKey()) == null || (mv = e.getValue()) == null || (v = internalGet(mk)) == null || (mv != v && !mv.equals(v))) return false; } } return true; } /* ----------------Iterators -------------- */ @SuppressWarnings("serial") static final class KeyIterator extends Traverser implements Spliterator, Enumeration { KeyIterator(ConcurrentHashMapV8 map) { super(map); } KeyIterator(Traverser it) { super(it); } public KeyIterator split() { if (nextKey != null) throw new IllegalStateException(); return new KeyIterator(this); } @SuppressWarnings("unchecked") public final K next() { if (nextVal == null && advance() == null) throw new NoSuchElementException(); Object k = nextKey; nextVal = null; return (K) k; } public final K nextElement() { return next(); } } @SuppressWarnings("serial") static final class ValueIterator extends Traverser implements Spliterator, Enumeration { ValueIterator(ConcurrentHashMapV8 map) { super(map); } ValueIterator(Traverser it) { super(it); } public ValueIterator split() { if (nextKey != null) throw new IllegalStateException(); return new ValueIterator(this); } @SuppressWarnings("unchecked") public final V next() { Object v; if ((v = nextVal) == null && (v = advance()) == null) throw new NoSuchElementException(); nextVal = null; return (V) v; } public final V nextElement() { return next(); } } @SuppressWarnings("serial") static final class EntryIterator extends Traverser implements Spliterator> { EntryIterator(ConcurrentHashMapV8 map) { super(map); } EntryIterator(Traverser it) { super(it); } public EntryIterator split() { if (nextKey != null) throw new IllegalStateException(); return new EntryIterator(this); } @SuppressWarnings("unchecked") public final Map.Entry next() { Object v; if ((v = nextVal) == null && (v = advance()) == null) throw new NoSuchElementException(); Object k = nextKey; nextVal = null; return new MapEntry((K)k, (V)v, map); } } /** * Exported Entry for iterators */ static final class MapEntry implements Map.Entry { final K key; // non-null V val; // non-null final ConcurrentHashMapV8 map; MapEntry(K key, V val, ConcurrentHashMapV8 map) { this.key = key; this.val = val; this.map = map; } public final K getKey() { return key; } public final V getValue() { return val; } public final int hashCode() { return key.hashCode() ^ val.hashCode(); } public final String toString(){ return key + "=" + val; } public final boolean equals(Object o) { Object k, v; Map.Entry e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry)o).getKey()) != null && (v = e.getValue()) != null && (k == key || k.equals(key)) && (v == val || v.equals(val))); } /** * Sets our entry's value and writes through to the map. The * value to return is somewhat arbitrary here. Since we do not * necessarily track asynchronous changes, the most recent * "previous" value could be different from what we return (or * could even have been removed in which case the put will * re-establish). We do not and cannot guarantee more. */ public final V setValue(V value) { if (value == null) throw new NullPointerException(); V v = val; val = value; map.put(key, value); return v; } } /* ---------------- Serialization Support -------------- */ /** * Stripped-down version of helper class used in previous version, * declared for the sake of serialization compatibility */ static class Segment implements Serializable { private static final long serialVersionUID = 2249069246763182397L; final float loadFactor; Segment(float lf) { this.loadFactor = lf; } } /** * Saves the state of the {@code ConcurrentHashMapV8} instance to a * stream (i.e., serializes it). * @param s the stream * @serialData * the key (Object) and value (Object) * for each key-value mapping, followed by a null pair. * The key-value mappings are emitted in no particular order. */ @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { if (segments == null) { // for serialization compatibility segments = (Segment[]) new Segment[DEFAULT_CONCURRENCY_LEVEL]; for (int i = 0; i < segments.length; ++i) segments[i] = new Segment(LOAD_FACTOR); } s.defaultWriteObject(); Traverser it = new Traverser(this); Object v; while ((v = it.advance()) != null) { s.writeObject(it.nextKey); s.writeObject(v); } s.writeObject(null); s.writeObject(null); segments = null; // throw away } /** * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream */ @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { s.defaultReadObject(); this.segments = null; // unneeded // initialize transient final field this.counter = new LongAdder(); // Create all nodes, then place in table once size is known long size = 0L; Node p = null; for (;;) { K k = (K) s.readObject(); V v = (V) s.readObject(); if (k != null && v != null) { int h = spread(k.hashCode()); p = new Node(h, k, v, p); ++size; } else break; } if (p != null) { boolean init = false; int n; if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) n = MAXIMUM_CAPACITY; else { int sz = (int)size; n = tableSizeFor(sz + (sz >>> 1) + 1); } int sc = sizeCtl; boolean collide = false; if (n > sc && SIZE_CTRL_UPDATER.compareAndSet(this, sc, -1)) { try { if (table == null) { init = true; AtomicReferenceArray tab = new AtomicReferenceArray(n); int mask = n - 1; while (p != null) { int j = p.hash & mask; Node next = p.next; Node q = p.next = tabAt(tab, j); setTabAt(tab, j, p); if (!collide && q != null && q.hash == p.hash) collide = true; p = next; } table = tab; counter.add(size); sc = n - (n >>> 2); } } finally { sizeCtl = sc; } if (collide) { // rescan and convert to TreeBins AtomicReferenceArray tab = table; for (int i = 0; i < tab.length(); ++i) { int c = 0; for (Node e = tabAt(tab, i); e != null; e = e.next) { if (++c > TREE_THRESHOLD && (e.key instanceof Comparable)) { replaceWithTreeBin(tab, i, e.key); break; } } } } } if (!init) { // Can only happen if unsafely published. while (p != null) { internalPut(p.key, p.val); p = p.next; } } } } // ------------------------------------------------------- // Sams /** Interface describing a void action of one argument */ public interface Action { void apply(A a); } /** Interface describing a void action of two arguments */ public interface BiAction { void apply(A a, B b); } /** Interface describing a function of one argument */ public interface Generator { T apply(); } /** Interface describing a function mapping its argument to a double */ public interface ObjectToDouble { double apply(A a); } /** Interface describing a function mapping its argument to a long */ public interface ObjectToLong { long apply(A a); } /** Interface describing a function mapping its argument to an int */ public interface ObjectToInt {int apply(A a); } /** Interface describing a function mapping two arguments to a double */ public interface ObjectByObjectToDouble { double apply(A a, B b); } /** Interface describing a function mapping two arguments to a long */ public interface ObjectByObjectToLong { long apply(A a, B b); } /** Interface describing a function mapping two arguments to an int */ public interface ObjectByObjectToInt {int apply(A a, B b); } /** Interface describing a function mapping a double to a double */ public interface DoubleToDouble { double apply(double a); } /** Interface describing a function mapping a long to a long */ public interface LongToLong { long apply(long a); } /** Interface describing a function mapping an int to an int */ public interface IntToInt { int apply(int a); } /** Interface describing a function mapping two doubles to a double */ public interface DoubleByDoubleToDouble { double apply(double a, double b); } /** Interface describing a function mapping two longs to a long */ public interface LongByLongToLong { long apply(long a, long b); } /** Interface describing a function mapping two ints to an int */ public interface IntByIntToInt { int apply(int a, int b); } /* ----------------Views -------------- */ /** * Base class for views. */ static abstract class CHMView { final ConcurrentHashMapV8 map; CHMView(ConcurrentHashMapV8 map) { this.map = map; } /** * Returns the map backing this view. * * @return the map backing this view */ public ConcurrentHashMapV8 getMap() { return map; } public final int size() { return map.size(); } public final boolean isEmpty() { return map.isEmpty(); } public final void clear() { map.clear(); } // implementations below rely on concrete classes supplying these abstract public Iterator iterator(); abstract public boolean contains(Object o); abstract public boolean remove(Object o); private static final String oomeMsg = "Required array size too large"; public final Object[] toArray() { long sz = map.mappingCount(); if (sz > (long)(MAX_ARRAY_SIZE)) throw new OutOfMemoryError(oomeMsg); int n = (int)sz; Object[] r = new Object[n]; int i = 0; Iterator it = iterator(); while (it.hasNext()) { if (i == n) { if (n >= MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) n = MAX_ARRAY_SIZE; else n += (n >>> 1) + 1; r = Arrays.copyOf(r, n); } r[i++] = it.next(); } return (i == n) ? r : Arrays.copyOf(r, i); } @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { long sz = map.mappingCount(); if (sz > (long)(MAX_ARRAY_SIZE)) throw new OutOfMemoryError(oomeMsg); int m = (int)sz; T[] r = (a.length >= m) ? a : (T[])java.lang.reflect.Array .newInstance(a.getClass().getComponentType(), m); int n = r.length; int i = 0; Iterator it = iterator(); while (it.hasNext()) { if (i == n) { if (n >= MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) n = MAX_ARRAY_SIZE; else n += (n >>> 1) + 1; r = Arrays.copyOf(r, n); } r[i++] = (T)it.next(); } if (a == r && i < n) { r[i] = null; // null-terminate return r; } return (i == n) ? r : Arrays.copyOf(r, i); } public final int hashCode() { int h = 0; for (Iterator it = iterator(); it.hasNext();) h += it.next().hashCode(); return h; } public final String toString() { StringBuilder sb = new StringBuilder(); sb.append('['); Iterator it = iterator(); if (it.hasNext()) { for (;;) { Object e = it.next(); sb.append(e == this ? "(this Collection)" : e); if (!it.hasNext()) break; sb.append(',').append(' '); } } return sb.append(']').toString(); } public final boolean containsAll(Collection c) { if (c != this) { for (Iterator it = c.iterator(); it.hasNext();) { Object e = it.next(); if (e == null || !contains(e)) return false; } } return true; } public final boolean removeAll(Collection c) { boolean modified = false; for (Iterator it = iterator(); it.hasNext();) { if (c.contains(it.next())) { it.remove(); modified = true; } } return modified; } public final boolean retainAll(Collection c) { boolean modified = false; for (Iterator it = iterator(); it.hasNext();) { if (!c.contains(it.next())) { it.remove(); modified = true; } } return modified; } } /** * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in * which additions may optionally be enabled by mapping to a * common value. This class cannot be directly instantiated. See * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, * {@link #newKeySet(int)}. */ public static class KeySetView extends CHMView implements Set, java.io.Serializable { private static final long serialVersionUID = 7249069246763182397L; private final V value; KeySetView(ConcurrentHashMapV8 map, V value) { // non-public super(map); this.value = value; } /** * Returns the default mapped value for additions, * or {@code null} if additions are not supported. * * @return the default mapped value for additions, or {@code null} * if not supported. */ public V getMappedValue() { return value; } // implement Set API public boolean contains(Object o) { return map.containsKey(o); } public boolean remove(Object o) { return map.remove(o) != null; } /** * Returns a "weakly consistent" iterator that will never * throw {@link ConcurrentModificationException}, and * guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not * guaranteed to) reflect any modifications subsequent to * construction. * * @return an iterator over the keys of this map */ public Iterator iterator() { return new KeyIterator(map); } public boolean add(K e) { V v; if ((v = value) == null) throw new UnsupportedOperationException(); if (e == null) throw new NullPointerException(); return map.internalPutIfAbsent(e, v) == null; } public boolean addAll(Collection c) { boolean added = false; V v; if ((v = value) == null) throw new UnsupportedOperationException(); for (K e : c) { if (e == null) throw new NullPointerException(); if (map.internalPutIfAbsent(e, v) == null) added = true; } return added; } public boolean equals(Object o) { Set c; return ((o instanceof Set) && ((c = (Set)o) == this || (containsAll(c) && c.containsAll(this)))); } } /** * A view of a ConcurrentHashMapV8 as a {@link Collection} of * values, in which additions are disabled. This class cannot be * directly instantiated. See {@link #values}, * *

The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. */ public static final class ValuesView extends CHMView implements Collection { ValuesView(ConcurrentHashMapV8 map) { super(map); } public final boolean contains(Object o) { return map.containsValue(o); } public final boolean remove(Object o) { if (o != null) { Iterator it = new ValueIterator(map); while (it.hasNext()) { if (o.equals(it.next())) { it.remove(); return true; } } } return false; } /** * Returns a "weakly consistent" iterator that will never * throw {@link ConcurrentModificationException}, and * guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not * guaranteed to) reflect any modifications subsequent to * construction. * * @return an iterator over the values of this map */ public final Iterator iterator() { return new ValueIterator(map); } public final boolean add(V e) { throw new UnsupportedOperationException(); } public final boolean addAll(Collection c) { throw new UnsupportedOperationException(); } } /** * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) * entries. This class cannot be directly instantiated. See * {@link #entrySet}. */ public static final class EntrySetView extends CHMView implements Set> { EntrySetView(ConcurrentHashMapV8 map) { super(map); } public final boolean contains(Object o) { Object k, v, r; Map.Entry e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry)o).getKey()) != null && (r = map.get(k)) != null && (v = e.getValue()) != null && (v == r || v.equals(r))); } public final boolean remove(Object o) { Object k, v; Map.Entry e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry)o).getKey()) != null && (v = e.getValue()) != null && map.remove(k, v)); } /** * Returns a "weakly consistent" iterator that will never * throw {@link ConcurrentModificationException}, and * guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not * guaranteed to) reflect any modifications subsequent to * construction. * * @return an iterator over the entries of this map */ public final Iterator> iterator() { return new EntryIterator(map); } public final boolean add(Entry e) { K key = e.getKey(); V value = e.getValue(); if (key == null || value == null) throw new NullPointerException(); return map.internalPut(key, value) == null; } public final boolean addAll(Collection> c) { boolean added = false; for (Entry e : c) { if (add(e)) added = true; } return added; } public boolean equals(Object o) { Set c; return ((o instanceof Set) && ((c = (Set)o) == this || (containsAll(c) && c.containsAll(this)))); } } }thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/Striped64.java0000644000004100000410000003216012530443654025442 0ustar www-datawww-data/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ // This is based on 1.5 version. package org.jruby.ext.thread_safe.jsr166e; import java.util.Random; /** * A package-local class holding common representation and mechanics * for classes supporting dynamic striping on 64bit values. The class * extends Number so that concrete subclasses must publicly do so. */ abstract class Striped64 extends Number { /* * This class maintains a lazily-initialized table of atomically * updated variables, plus an extra "base" field. The table size * is a power of two. Indexing uses masked per-thread hash codes. * Nearly all declarations in this class are package-private, * accessed directly by subclasses. * * Table entries are of class Cell; a variant of AtomicLong padded * to reduce cache contention on most processors. Padding is * overkill for most Atomics because they are usually irregularly * scattered in memory and thus don't interfere much with each * other. But Atomic objects residing in arrays will tend to be * placed adjacent to each other, and so will most often share * cache lines (with a huge negative performance impact) without * this precaution. * * In part because Cells are relatively large, we avoid creating * them until they are needed. When there is no contention, all * updates are made to the base field. Upon first contention (a * failed CAS on base update), the table is initialized to size 2. * The table size is doubled upon further contention until * reaching the nearest power of two greater than or equal to the * number of CPUS. Table slots remain empty (null) until they are * needed. * * A single spinlock ("busy") is used for initializing and * resizing the table, as well as populating slots with new Cells. * There is no need for a blocking lock: When the lock is not * available, threads try other slots (or the base). During these * retries, there is increased contention and reduced locality, * which is still better than alternatives. * * Per-thread hash codes are initialized to random values. * Contention and/or table collisions are indicated by failed * CASes when performing an update operation (see method * retryUpdate). Upon a collision, if the table size is less than * the capacity, it is doubled in size unless some other thread * holds the lock. If a hashed slot is empty, and lock is * available, a new Cell is created. Otherwise, if the slot * exists, a CAS is tried. Retries proceed by "double hashing", * using a secondary hash (Marsaglia XorShift) to try to find a * free slot. * * The table size is capped because, when there are more threads * than CPUs, supposing that each thread were bound to a CPU, * there would exist a perfect hash function mapping threads to * slots that eliminates collisions. When we reach capacity, we * search for this mapping by randomly varying the hash codes of * colliding threads. Because search is random, and collisions * only become known via CAS failures, convergence can be slow, * and because threads are typically not bound to CPUS forever, * may not occur at all. However, despite these limitations, * observed contention rates are typically low in these cases. * * It is possible for a Cell to become unused when threads that * once hashed to it terminate, as well as in the case where * doubling the table causes no thread to hash to it under * expanded mask. We do not try to detect or remove such cells, * under the assumption that for long-running instances, observed * contention levels will recur, so the cells will eventually be * needed again; and for short-lived ones, it does not matter. */ /** * Padded variant of AtomicLong supporting only raw accesses plus CAS. * The value field is placed between pads, hoping that the JVM doesn't * reorder them. * * JVM intrinsics note: It would be possible to use a release-only * form of CAS here, if it were provided. */ static final class Cell { volatile long p0, p1, p2, p3, p4, p5, p6; volatile long value; volatile long q0, q1, q2, q3, q4, q5, q6; Cell(long x) { value = x; } final boolean cas(long cmp, long val) { return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); } // Unsafe mechanics private static final sun.misc.Unsafe UNSAFE; private static final long valueOffset; static { try { UNSAFE = getUnsafe(); Class ak = Cell.class; valueOffset = UNSAFE.objectFieldOffset (ak.getDeclaredField("value")); } catch (Exception e) { throw new Error(e); } } } /** * Holder for the thread-local hash code. The code is initially * random, but may be set to a different value upon collisions. */ static final class HashCode { static final Random rng = new Random(); int code; HashCode() { int h = rng.nextInt(); // Avoid zero to allow xorShift rehash code = (h == 0) ? 1 : h; } } /** * The corresponding ThreadLocal class */ static final class ThreadHashCode extends ThreadLocal { public HashCode initialValue() { return new HashCode(); } } /** * Static per-thread hash codes. Shared across all instances to * reduce ThreadLocal pollution and because adjustments due to * collisions in one table are likely to be appropriate for * others. */ static final ThreadHashCode threadHashCode = new ThreadHashCode(); /** Number of CPUS, to place bound on table size */ static final int NCPU = Runtime.getRuntime().availableProcessors(); /** * Table of cells. When non-null, size is a power of 2. */ transient volatile Cell[] cells; /** * Base value, used mainly when there is no contention, but also as * a fallback during table initialization races. Updated via CAS. */ transient volatile long base; /** * Spinlock (locked via CAS) used when resizing and/or creating Cells. */ transient volatile int busy; /** * Package-private default constructor */ Striped64() { } /** * CASes the base field. */ final boolean casBase(long cmp, long val) { return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); } /** * CASes the busy field from 0 to 1 to acquire lock. */ final boolean casBusy() { return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); } /** * Computes the function of current and new value. Subclasses * should open-code this update function for most uses, but the * virtualized form is needed within retryUpdate. * * @param currentValue the current value (of either base or a cell) * @param newValue the argument from a user update call * @return result of the update function */ abstract long fn(long currentValue, long newValue); /** * Handles cases of updates involving initialization, resizing, * creating new Cells, and/or contention. See above for * explanation. This method suffers the usual non-modularity * problems of optimistic retry code, relying on rechecked sets of * reads. * * @param x the value * @param hc the hash code holder * @param wasUncontended false if CAS failed before call */ final void retryUpdate(long x, HashCode hc, boolean wasUncontended) { int h = hc.code; boolean collide = false; // True if last slot nonempty for (;;) { Cell[] as; Cell a; int n; long v; if ((as = cells) != null && (n = as.length) > 0) { if ((a = as[(n - 1) & h]) == null) { if (busy == 0) { // Try to attach new Cell Cell r = new Cell(x); // Optimistically create if (busy == 0 && casBusy()) { boolean created = false; try { // Recheck under lock Cell[] rs; int m, j; if ((rs = cells) != null && (m = rs.length) > 0 && rs[j = (m - 1) & h] == null) { rs[j] = r; created = true; } } finally { busy = 0; } if (created) break; continue; // Slot is now non-empty } } collide = false; } else if (!wasUncontended) // CAS already known to fail wasUncontended = true; // Continue after rehash else if (a.cas(v = a.value, fn(v, x))) break; else if (n >= NCPU || cells != as) collide = false; // At max size or stale else if (!collide) collide = true; else if (busy == 0 && casBusy()) { try { if (cells == as) { // Expand table unless stale Cell[] rs = new Cell[n << 1]; for (int i = 0; i < n; ++i) rs[i] = as[i]; cells = rs; } } finally { busy = 0; } collide = false; continue; // Retry with expanded table } h ^= h << 13; // Rehash h ^= h >>> 17; h ^= h << 5; } else if (busy == 0 && cells == as && casBusy()) { boolean init = false; try { // Initialize table if (cells == as) { Cell[] rs = new Cell[2]; rs[h & 1] = new Cell(x); cells = rs; init = true; } } finally { busy = 0; } if (init) break; } else if (casBase(v = base, fn(v, x))) break; // Fall back on using base } hc.code = h; // Record index for next time } /** * Sets base and all cells to the given value. */ final void internalReset(long initialValue) { Cell[] as = cells; base = initialValue; if (as != null) { int n = as.length; for (int i = 0; i < n; ++i) { Cell a = as[i]; if (a != null) a.value = initialValue; } } } // Unsafe mechanics private static final sun.misc.Unsafe UNSAFE; private static final long baseOffset; private static final long busyOffset; static { try { UNSAFE = getUnsafe(); Class sk = Striped64.class; baseOffset = UNSAFE.objectFieldOffset (sk.getDeclaredField("base")); busyOffset = UNSAFE.objectFieldOffset (sk.getDeclaredField("busy")); } catch (Exception e) { throw new Error(e); } } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ private static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException se) { try { return java.security.AccessController.doPrivileged (new java.security .PrivilegedExceptionAction() { public sun.misc.Unsafe run() throws Exception { java.lang.reflect.Field f = sun.misc .Unsafe.class.getDeclaredField("theUnsafe"); f.setAccessible(true); return (sun.misc.Unsafe) f.get(null); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } } } thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMap.java0000644000004100000410000000226612530443654027246 0ustar www-datawww-datapackage org.jruby.ext.thread_safe.jsr166e; import java.util.Map; import java.util.Set; public interface ConcurrentHashMap { /** Interface describing a function of one argument */ public interface Fun { T apply(A a); } /** Interface describing a function of two arguments */ public interface BiFun { T apply(A a, B b); } public V get(K key); public V put(K key, V value); public V putIfAbsent(K key, V value); public V computeIfAbsent(K key, Fun mf); public V computeIfPresent(K key, BiFun mf); public V compute(K key, BiFun mf); public V merge(K key, V value, BiFun mf); public boolean replace(K key, V oldVal, V newVal); public V replace(K key, V value); public boolean containsKey(K key); public boolean remove(Object key, Object value); public V remove(K key); public void clear(); public Set> entrySet(); public int size(); public V getValueOrDefault(Object key, V defaultValue); public boolean containsValue(V value); public K findKey(V value); } thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/LongAdder.java0000644000004100000410000001335112530443654025516 0ustar www-datawww-data/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ // This is based on 1.9 version. package org.jruby.ext.thread_safe.jsr166e; import java.util.concurrent.atomic.AtomicLong; import java.io.IOException; import java.io.Serializable; import java.io.ObjectInputStream; /** * One or more variables that together maintain an initially zero * {@code long} sum. When updates (method {@link #add}) are contended * across threads, the set of variables may grow dynamically to reduce * contention. Method {@link #sum} (or, equivalently, {@link * #longValue}) returns the current total combined across the * variables maintaining the sum. * *

This class is usually preferable to {@link AtomicLong} when * multiple threads update a common sum that is used for purposes such * as collecting statistics, not for fine-grained synchronization * control. Under low update contention, the two classes have similar * characteristics. But under high contention, expected throughput of * this class is significantly higher, at the expense of higher space * consumption. * *

This class extends {@link Number}, but does not define * methods such as {@code hashCode} and {@code compareTo} because * instances are expected to be mutated, and so are not useful as * collection keys. * *

jsr166e note: This class is targeted to be placed in * java.util.concurrent.atomic. * * @since 1.8 * @author Doug Lea */ public class LongAdder extends Striped64 implements Serializable { private static final long serialVersionUID = 7249069246863182397L; /** * Version of plus for use in retryUpdate */ final long fn(long v, long x) { return v + x; } /** * Creates a new adder with initial sum of zero. */ public LongAdder() { } /** * Adds the given value. * * @param x the value to add */ public void add(long x) { Cell[] as; long b, v; HashCode hc; Cell a; int n; if ((as = cells) != null || !casBase(b = base, b + x)) { boolean uncontended = true; int h = (hc = threadHashCode.get()).code; if (as == null || (n = as.length) < 1 || (a = as[(n - 1) & h]) == null || !(uncontended = a.cas(v = a.value, v + x))) retryUpdate(x, hc, uncontended); } } /** * Equivalent to {@code add(1)}. */ public void increment() { add(1L); } /** * Equivalent to {@code add(-1)}. */ public void decrement() { add(-1L); } /** * Returns the current sum. The returned value is NOT an * atomic snapshot: Invocation in the absence of concurrent * updates returns an accurate result, but concurrent updates that * occur while the sum is being calculated might not be * incorporated. * * @return the sum */ public long sum() { long sum = base; Cell[] as = cells; if (as != null) { int n = as.length; for (int i = 0; i < n; ++i) { Cell a = as[i]; if (a != null) sum += a.value; } } return sum; } /** * Resets variables maintaining the sum to zero. This method may * be a useful alternative to creating a new adder, but is only * effective if there are no concurrent updates. Because this * method is intrinsically racy, it should only be used when it is * known that no threads are concurrently updating. */ public void reset() { internalReset(0L); } /** * Equivalent in effect to {@link #sum} followed by {@link * #reset}. This method may apply for example during quiescent * points between multithreaded computations. If there are * updates concurrent with this method, the returned value is * not guaranteed to be the final value occurring before * the reset. * * @return the sum */ public long sumThenReset() { long sum = base; Cell[] as = cells; base = 0L; if (as != null) { int n = as.length; for (int i = 0; i < n; ++i) { Cell a = as[i]; if (a != null) { sum += a.value; a.value = 0L; } } } return sum; } /** * Returns the String representation of the {@link #sum}. * @return the String representation of the {@link #sum} */ public String toString() { return Long.toString(sum()); } /** * Equivalent to {@link #sum}. * * @return the sum */ public long longValue() { return sum(); } /** * Returns the {@link #sum} as an {@code int} after a narrowing * primitive conversion. */ public int intValue() { return (int)sum(); } /** * Returns the {@link #sum} as a {@code float} * after a widening primitive conversion. */ public float floatValue() { return (float)sum(); } /** * Returns the {@link #sum} as a {@code double} after a widening * primitive conversion. */ public double doubleValue() { return (double)sum(); } private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { s.defaultWriteObject(); s.writeLong(sum()); } private void readObject(ObjectInputStream s) throws IOException, ClassNotFoundException { s.defaultReadObject(); busy = 0; cells = null; base = s.readLong(); } } thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/jsr166e/ConcurrentHashMapV8.java0000644000004100000410000046711012530443654027467 0ustar www-datawww-data/* * Written by Doug Lea with assistance from members of JCP JSR-166 * Expert Group and released to the public domain, as explained at * http://creativecommons.org/publicdomain/zero/1.0/ */ // This is based on the 1.79 version. package org.jruby.ext.thread_safe.jsr166e; import org.jruby.RubyClass; import org.jruby.RubyNumeric; import org.jruby.RubyObject; import org.jruby.exceptions.RaiseException; import org.jruby.ext.thread_safe.jsr166y.ThreadLocalRandom; import org.jruby.runtime.ThreadContext; import org.jruby.runtime.builtin.IRubyObject; import java.util.Arrays; import java.util.Map; import java.util.Set; import java.util.Collection; import java.util.Hashtable; import java.util.HashMap; import java.util.Iterator; import java.util.Enumeration; import java.util.ConcurrentModificationException; import java.util.NoSuchElementException; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.AbstractQueuedSynchronizer; import java.io.Serializable; /** * A hash table supporting full concurrency of retrievals and * high expected concurrency for updates. This class obeys the * same functional specification as {@link java.util.Hashtable}, and * includes versions of methods corresponding to each method of * {@code Hashtable}. However, even though all operations are * thread-safe, retrieval operations do not entail locking, * and there is not any support for locking the entire table * in a way that prevents all access. This class is fully * interoperable with {@code Hashtable} in programs that rely on its * thread safety but not on its synchronization details. * *

Retrieval operations (including {@code get}) generally do not * block, so may overlap with update operations (including {@code put} * and {@code remove}). Retrievals reflect the results of the most * recently completed update operations holding upon their * onset. (More formally, an update operation for a given key bears a * happens-before relation with any (non-null) retrieval for * that key reporting the updated value.) For aggregate operations * such as {@code putAll} and {@code clear}, concurrent retrievals may * reflect insertion or removal of only some entries. Similarly, * Iterators and Enumerations return elements reflecting the state of * the hash table at some point at or since the creation of the * iterator/enumeration. They do not throw {@link * ConcurrentModificationException}. However, iterators are designed * to be used by only one thread at a time. Bear in mind that the * results of aggregate status methods including {@code size}, {@code * isEmpty}, and {@code containsValue} are typically useful only when * a map is not undergoing concurrent updates in other threads. * Otherwise the results of these methods reflect transient states * that may be adequate for monitoring or estimation purposes, but not * for program control. * *

The table is dynamically expanded when there are too many * collisions (i.e., keys that have distinct hash codes but fall into * the same slot modulo the table size), with the expected average * effect of maintaining roughly two bins per mapping (corresponding * to a 0.75 load factor threshold for resizing). There may be much * variance around this average as mappings are added and removed, but * overall, this maintains a commonly accepted time/space tradeoff for * hash tables. However, resizing this or any other kind of hash * table may be a relatively slow operation. When possible, it is a * good idea to provide a size estimate as an optional {@code * initialCapacity} constructor argument. An additional optional * {@code loadFactor} constructor argument provides a further means of * customizing initial table capacity by specifying the table density * to be used in calculating the amount of space to allocate for the * given number of elements. Also, for compatibility with previous * versions of this class, constructors may optionally specify an * expected {@code concurrencyLevel} as an additional hint for * internal sizing. Note that using many keys with exactly the same * {@code hashCode()} is a sure way to slow down performance of any * hash table. * *

A {@link Set} projection of a ConcurrentHashMapV8 may be created * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed * (using {@link #keySet(Object)} when only keys are of interest, and the * mapped values are (perhaps transiently) not used or all take the * same mapping value. * *

A ConcurrentHashMapV8 can be used as scalable frequency map (a * form of histogram or multiset) by using {@link LongAdder} values * and initializing via {@link #computeIfAbsent}. For example, to add * a count to a {@code ConcurrentHashMapV8 freqs}, you * can use {@code freqs.computeIfAbsent(k -> new * LongAdder()).increment();} * *

This class and its views and iterators implement all of the * optional methods of the {@link Map} and {@link Iterator} * interfaces. * *

Like {@link Hashtable} but unlike {@link HashMap}, this class * does not allow {@code null} to be used as a key or value. * *

ConcurrentHashMapV8s support parallel operations using the {@link * ForkJoinPool#commonPool}. (Tasks that may be used in other contexts * are available in class {@link ForkJoinTasks}). These operations are * designed to be safely, and often sensibly, applied even with maps * that are being concurrently updated by other threads; for example, * when computing a snapshot summary of the values in a shared * registry. There are three kinds of operation, each with four * forms, accepting functions with Keys, Values, Entries, and (Key, * Value) arguments and/or return values. (The first three forms are * also available via the {@link #keySet()}, {@link #values()} and * {@link #entrySet()} views). Because the elements of a * ConcurrentHashMapV8 are not ordered in any particular way, and may be * processed in different orders in different parallel executions, the * correctness of supplied functions should not depend on any * ordering, or on any other objects or values that may transiently * change while computation is in progress; and except for forEach * actions, should ideally be side-effect-free. * *

* *

The concurrency properties of bulk operations follow * from those of ConcurrentHashMapV8: Any non-null result returned * from {@code get(key)} and related access methods bears a * happens-before relation with the associated insertion or * update. The result of any bulk operation reflects the * composition of these per-element relations (but is not * necessarily atomic with respect to the map as a whole unless it * is somehow known to be quiescent). Conversely, because keys * and values in the map are never null, null serves as a reliable * atomic indicator of the current lack of any result. To * maintain this property, null serves as an implicit basis for * all non-scalar reduction operations. For the double, long, and * int versions, the basis should be one that, when combined with * any other value, returns that other value (more formally, it * should be the identity element for the reduction). Most common * reductions have these properties; for example, computing a sum * with basis 0 or a minimum with basis MAX_VALUE. * *

Search and transformation functions provided as arguments * should similarly return null to indicate the lack of any result * (in which case it is not used). In the case of mapped * reductions, this also enables transformations to serve as * filters, returning null (or, in the case of primitive * specializations, the identity basis) if the element should not * be combined. You can create compound transformations and * filterings by composing them yourself under this "null means * there is nothing there now" rule before using them in search or * reduce operations. * *

Methods accepting and/or returning Entry arguments maintain * key-value associations. They may be useful for example when * finding the key for the greatest value. Note that "plain" Entry * arguments can be supplied using {@code new * AbstractMap.SimpleEntry(k,v)}. * *

Bulk operations may complete abruptly, throwing an * exception encountered in the application of a supplied * function. Bear in mind when handling such exceptions that other * concurrently executing functions could also have thrown * exceptions, or would have done so if the first exception had * not occurred. * *

Parallel speedups for bulk operations compared to sequential * processing are common but not guaranteed. Operations involving * brief functions on small maps may execute more slowly than * sequential loops if the underlying work to parallelize the * computation is more expensive than the computation itself. * Similarly, parallelization may not lead to much actual parallelism * if all processors are busy performing unrelated tasks. * *

All arguments to all task methods must be non-null. * *

jsr166e note: During transition, this class * uses nested functional interfaces with different names but the * same forms as those expected for JDK8. * *

This class is a member of the * * Java Collections Framework. * * @since 1.5 * @author Doug Lea * @param the type of keys maintained by this map * @param the type of mapped values */ public class ConcurrentHashMapV8 implements ConcurrentMap, Serializable, ConcurrentHashMap { private static final long serialVersionUID = 7249069246763182397L; /** * A partitionable iterator. A Spliterator can be traversed * directly, but can also be partitioned (before traversal) by * creating another Spliterator that covers a non-overlapping * portion of the elements, and so may be amenable to parallel * execution. * *

This interface exports a subset of expected JDK8 * functionality. * *

Sample usage: Here is one (of the several) ways to compute * the sum of the values held in a map using the ForkJoin * framework. As illustrated here, Spliterators are well suited to * designs in which a task repeatedly splits off half its work * into forked subtasks until small enough to process directly, * and then joins these subtasks. Variants of this style can also * be used in completion-based designs. * *

     * {@code ConcurrentHashMapV8 m = ...
     * // split as if have 8 * parallelism, for load balance
     * int n = m.size();
     * int p = aForkJoinPool.getParallelism() * 8;
     * int split = (n < p)? n : p;
     * long sum = aForkJoinPool.invoke(new SumValues(m.valueSpliterator(), split, null));
     * // ...
     * static class SumValues extends RecursiveTask {
     *   final Spliterator s;
     *   final int split;             // split while > 1
     *   final SumValues nextJoin;    // records forked subtasks to join
     *   SumValues(Spliterator s, int depth, SumValues nextJoin) {
     *     this.s = s; this.depth = depth; this.nextJoin = nextJoin;
     *   }
     *   public Long compute() {
     *     long sum = 0;
     *     SumValues subtasks = null; // fork subtasks
     *     for (int s = split >>> 1; s > 0; s >>>= 1)
     *       (subtasks = new SumValues(s.split(), s, subtasks)).fork();
     *     while (s.hasNext())        // directly process remaining elements
     *       sum += s.next();
     *     for (SumValues t = subtasks; t != null; t = t.nextJoin)
     *       sum += t.join();         // collect subtask results
     *     return sum;
     *   }
     * }
     * }
*/ public static interface Spliterator extends Iterator { /** * Returns a Spliterator covering approximately half of the * elements, guaranteed not to overlap with those subsequently * returned by this Spliterator. After invoking this method, * the current Spliterator will not produce any of * the elements of the returned Spliterator, but the two * Spliterators together will produce all of the elements that * would have been produced by this Spliterator had this * method not been called. The exact number of elements * produced by the returned Spliterator is not guaranteed, and * may be zero (i.e., with {@code hasNext()} reporting {@code * false}) if this Spliterator cannot be further split. * * @return a Spliterator covering approximately half of the * elements * @throws IllegalStateException if this Spliterator has * already commenced traversing elements */ Spliterator split(); } /* * Overview: * * The primary design goal of this hash table is to maintain * concurrent readability (typically method get(), but also * iterators and related methods) while minimizing update * contention. Secondary goals are to keep space consumption about * the same or better than java.util.HashMap, and to support high * initial insertion rates on an empty table by many threads. * * Each key-value mapping is held in a Node. Because Node fields * can contain special values, they are defined using plain Object * types. Similarly in turn, all internal methods that use them * work off Object types. And similarly, so do the internal * methods of auxiliary iterator and view classes. All public * generic typed methods relay in/out of these internal methods, * supplying null-checks and casts as needed. This also allows * many of the public methods to be factored into a smaller number * of internal methods (although sadly not so for the five * variants of put-related operations). The validation-based * approach explained below leads to a lot of code sprawl because * retry-control precludes factoring into smaller methods. * * The table is lazily initialized to a power-of-two size upon the * first insertion. Each bin in the table normally contains a * list of Nodes (most often, the list has only zero or one Node). * Table accesses require volatile/atomic reads, writes, and * CASes. Because there is no other way to arrange this without * adding further indirections, we use intrinsics * (sun.misc.Unsafe) operations. The lists of nodes within bins * are always accurately traversable under volatile reads, so long * as lookups check hash code and non-nullness of value before * checking key equality. * * We use the top two bits of Node hash fields for control * purposes -- they are available anyway because of addressing * constraints. As explained further below, these top bits are * used as follows: * 00 - Normal * 01 - Locked * 11 - Locked and may have a thread waiting for lock * 10 - Node is a forwarding node * * The lower 30 bits of each Node's hash field contain a * transformation of the key's hash code, except for forwarding * nodes, for which the lower bits are zero (and so always have * hash field == MOVED). * * Insertion (via put or its variants) of the first node in an * empty bin is performed by just CASing it to the bin. This is * by far the most common case for put operations under most * key/hash distributions. Other update operations (insert, * delete, and replace) require locks. We do not want to waste * the space required to associate a distinct lock object with * each bin, so instead use the first node of a bin list itself as * a lock. Blocking support for these locks relies on the builtin * "synchronized" monitors. However, we also need a tryLock * construction, so we overlay these by using bits of the Node * hash field for lock control (see above), and so normally use * builtin monitors only for blocking and signalling using * wait/notifyAll constructions. See Node.tryAwaitLock. * * Using the first node of a list as a lock does not by itself * suffice though: When a node is locked, any update must first * validate that it is still the first node after locking it, and * retry if not. Because new nodes are always appended to lists, * once a node is first in a bin, it remains first until deleted * or the bin becomes invalidated (upon resizing). However, * operations that only conditionally update may inspect nodes * until the point of update. This is a converse of sorts to the * lazy locking technique described by Herlihy & Shavit. * * The main disadvantage of per-bin locks is that other update * operations on other nodes in a bin list protected by the same * lock can stall, for example when user equals() or mapping * functions take a long time. However, statistically, under * random hash codes, this is not a common problem. Ideally, the * frequency of nodes in bins follows a Poisson distribution * (http://en.wikipedia.org/wiki/Poisson_distribution) with a * parameter of about 0.5 on average, given the resizing threshold * of 0.75, although with a large variance because of resizing * granularity. Ignoring variance, the expected occurrences of * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The * first values are: * * 0: 0.60653066 * 1: 0.30326533 * 2: 0.07581633 * 3: 0.01263606 * 4: 0.00157952 * 5: 0.00015795 * 6: 0.00001316 * 7: 0.00000094 * 8: 0.00000006 * more: less than 1 in ten million * * Lock contention probability for two threads accessing distinct * elements is roughly 1 / (8 * #elements) under random hashes. * * Actual hash code distributions encountered in practice * sometimes deviate significantly from uniform randomness. This * includes the case when N > (1<<30), so some keys MUST collide. * Similarly for dumb or hostile usages in which multiple keys are * designed to have identical hash codes. Also, although we guard * against the worst effects of this (see method spread), sets of * hashes may differ only in bits that do not impact their bin * index for a given power-of-two mask. So we use a secondary * strategy that applies when the number of nodes in a bin exceeds * a threshold, and at least one of the keys implements * Comparable. These TreeBins use a balanced tree to hold nodes * (a specialized form of red-black trees), bounding search time * to O(log N). Each search step in a TreeBin is around twice as * slow as in a regular list, but given that N cannot exceed * (1<<64) (before running out of addresses) this bounds search * steps, lock hold times, etc, to reasonable constants (roughly * 100 nodes inspected per operation worst case) so long as keys * are Comparable (which is very common -- String, Long, etc). * TreeBin nodes (TreeNodes) also maintain the same "next" * traversal pointers as regular nodes, so can be traversed in * iterators in the same way. * * The table is resized when occupancy exceeds a percentage * threshold (nominally, 0.75, but see below). Only a single * thread performs the resize (using field "sizeCtl", to arrange * exclusion), but the table otherwise remains usable for reads * and updates. Resizing proceeds by transferring bins, one by * one, from the table to the next table. Because we are using * power-of-two expansion, the elements from each bin must either * stay at same index, or move with a power of two offset. We * eliminate unnecessary node creation by catching cases where old * nodes can be reused because their next fields won't change. On * average, only about one-sixth of them need cloning when a table * doubles. The nodes they replace will be garbage collectable as * soon as they are no longer referenced by any reader thread that * may be in the midst of concurrently traversing table. Upon * transfer, the old table bin contains only a special forwarding * node (with hash field "MOVED") that contains the next table as * its key. On encountering a forwarding node, access and update * operations restart, using the new table. * * Each bin transfer requires its bin lock. However, unlike other * cases, a transfer can skip a bin if it fails to acquire its * lock, and revisit it later (unless it is a TreeBin). Method * rebuild maintains a buffer of TRANSFER_BUFFER_SIZE bins that * have been skipped because of failure to acquire a lock, and * blocks only if none are available (i.e., only very rarely). * The transfer operation must also ensure that all accessible * bins in both the old and new table are usable by any traversal. * When there are no lock acquisition failures, this is arranged * simply by proceeding from the last bin (table.length - 1) up * towards the first. Upon seeing a forwarding node, traversals * (see class Iter) arrange to move to the new table * without revisiting nodes. However, when any node is skipped * during a transfer, all earlier table bins may have become * visible, so are initialized with a reverse-forwarding node back * to the old table until the new ones are established. (This * sometimes requires transiently locking a forwarding node, which * is possible under the above encoding.) These more expensive * mechanics trigger only when necessary. * * The traversal scheme also applies to partial traversals of * ranges of bins (via an alternate Traverser constructor) * to support partitioned aggregate operations. Also, read-only * operations give up if ever forwarded to a null table, which * provides support for shutdown-style clearing, which is also not * currently implemented. * * Lazy table initialization minimizes footprint until first use, * and also avoids resizings when the first operation is from a * putAll, constructor with map argument, or deserialization. * These cases attempt to override the initial capacity settings, * but harmlessly fail to take effect in cases of races. * * The element count is maintained using a LongAdder, which avoids * contention on updates but can encounter cache thrashing if read * too frequently during concurrent access. To avoid reading so * often, resizing is attempted either when a bin lock is * contended, or upon adding to a bin already holding two or more * nodes (checked before adding in the xIfAbsent methods, after * adding in others). Under uniform hash distributions, the * probability of this occurring at threshold is around 13%, * meaning that only about 1 in 8 puts check threshold (and after * resizing, many fewer do so). But this approximation has high * variance for small table sizes, so we check on any collision * for sizes <= 64. The bulk putAll operation further reduces * contention by only committing count updates upon these size * checks. * * Maintaining API and serialization compatibility with previous * versions of this class introduces several oddities. Mainly: We * leave untouched but unused constructor arguments refering to * concurrencyLevel. We accept a loadFactor constructor argument, * but apply it only to initial table capacity (which is the only * time that we can guarantee to honor it.) We also declare an * unused "Segment" class that is instantiated in minimal form * only when serializing. */ /* ---------------- Constants -------------- */ /** * The largest possible table capacity. This value must be * exactly 1<<30 to stay within Java array allocation and indexing * bounds for power of two table sizes, and is further required * because the top two bits of 32bit hash fields are used for * control purposes. */ private static final int MAXIMUM_CAPACITY = 1 << 30; /** * The default initial table capacity. Must be a power of 2 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. */ private static final int DEFAULT_CAPACITY = 16; /** * The largest possible (non-power of two) array size. * Needed by toArray and related methods. */ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; /** * The default concurrency level for this table. Unused but * defined for compatibility with previous versions of this class. */ private static final int DEFAULT_CONCURRENCY_LEVEL = 16; /** * The load factor for this table. Overrides of this value in * constructors affect only the initial table capacity. The * actual floating point value isn't normally used -- it is * simpler to use expressions such as {@code n - (n >>> 2)} for * the associated resizing threshold. */ private static final float LOAD_FACTOR = 0.75f; /** * The buffer size for skipped bins during transfers. The * value is arbitrary but should be large enough to avoid * most locking stalls during resizes. */ private static final int TRANSFER_BUFFER_SIZE = 32; /** * The bin count threshold for using a tree rather than list for a * bin. The value reflects the approximate break-even point for * using tree-based operations. * Note that Doug's version defaults to 8, but when dealing with * Ruby objects it is actually beneficial to avoid TreeNodes * as long as possible as it usually means going into Ruby land. */ private static final int TREE_THRESHOLD = 16; /* * Encodings for special uses of Node hash fields. See above for * explanation. */ static final int MOVED = 0x80000000; // hash field for forwarding nodes static final int LOCKED = 0x40000000; // set/tested only as a bit static final int WAITING = 0xc0000000; // both bits set/tested together static final int HASH_BITS = 0x3fffffff; // usable bits of normal node hash /* ---------------- Fields -------------- */ /** * The array of bins. Lazily initialized upon first insertion. * Size is always a power of two. Accessed directly by iterators. */ transient volatile Node[] table; /** * The counter maintaining number of elements. */ private transient final LongAdder counter; /** * Table initialization and resizing control. When negative, the * table is being initialized or resized. Otherwise, when table is * null, holds the initial table size to use upon creation, or 0 * for default. After initialization, holds the next element count * value upon which to resize the table. */ private transient volatile int sizeCtl; // views private transient KeySetView keySet; private transient ValuesView values; private transient EntrySetView entrySet; /** For serialization compatibility. Null unless serialized; see below */ private Segment[] segments; /* ---------------- Table element access -------------- */ /* * Volatile access methods are used for table elements as well as * elements of in-progress next table while resizing. Uses are * null checked by callers, and implicitly bounds-checked, relying * on the invariants that tab arrays have non-zero size, and all * indices are masked with (tab.length - 1) which is never * negative and always less than length. Note that, to be correct * wrt arbitrary concurrency errors by users, bounds checks must * operate on local variables, which accounts for some odd-looking * inline assignments below. */ static final Node tabAt(Node[] tab, int i) { // used by Iter return (Node)UNSAFE.getObjectVolatile(tab, ((long)i< 1 ? 64 : 1; /** * Spins a while if LOCKED bit set and this node is the first * of its bin, and then sets WAITING bits on hash field and * blocks (once) if they are still set. It is OK for this * method to return even if lock is not available upon exit, * which enables these simple single-wait mechanics. * * The corresponding signalling operation is performed within * callers: Upon detecting that WAITING has been set when * unlocking lock (via a failed CAS from non-waiting LOCKED * state), unlockers acquire the sync lock and perform a * notifyAll. * * The initial sanity check on tab and bounds is not currently * necessary in the only usages of this method, but enables * use in other future contexts. */ final void tryAwaitLock(Node[] tab, int i) { if (tab != null && i >= 0 && i < tab.length) { // sanity check int r = ThreadLocalRandom.current().nextInt(); // randomize spins int spins = MAX_SPINS, h; while (tabAt(tab, i) == this && ((h = hash) & LOCKED) != 0) { if (spins >= 0) { r ^= r << 1; r ^= r >>> 3; r ^= r << 10; // xorshift if (r >= 0 && --spins == 0) Thread.yield(); // yield before block } else if (casHash(h, h | WAITING)) { synchronized (this) { if (tabAt(tab, i) == this && (hash & WAITING) == WAITING) { try { wait(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); } } else notifyAll(); // possibly won race vs signaller } break; } } } } // Unsafe mechanics for casHash private static final sun.misc.Unsafe UNSAFE; private static final long hashOffset; static { try { UNSAFE = getUnsafe(); Class k = Node.class; hashOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("hash")); } catch (Exception e) { throw new Error(e); } } } /* ---------------- TreeBins -------------- */ /** * Nodes for use in TreeBins */ static final class TreeNode extends Node { TreeNode parent; // red-black tree links TreeNode left; TreeNode right; TreeNode prev; // needed to unlink next upon deletion boolean red; TreeNode(int hash, Object key, Object val, Node next, TreeNode parent) { super(hash, key, val, next); this.parent = parent; } } /** * A specialized form of red-black tree for use in bins * whose size exceeds a threshold. * * TreeBins use a special form of comparison for search and * related operations (which is the main reason we cannot use * existing collections such as TreeMaps). TreeBins contain * Comparable elements, but may contain others, as well as * elements that are Comparable but not necessarily Comparable * for the same T, so we cannot invoke compareTo among them. To * handle this, the tree is ordered primarily by hash value, then * by getClass().getName() order, and then by Comparator order * among elements of the same class. On lookup at a node, if * elements are not comparable or compare as 0, both left and * right children may need to be searched in the case of tied hash * values. (This corresponds to the full list search that would be * necessary if all elements were non-Comparable and had tied * hashes.) The red-black balancing code is updated from * pre-jdk-collections * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) * based in turn on Cormen, Leiserson, and Rivest "Introduction to * Algorithms" (CLR). * * TreeBins also maintain a separate locking discipline than * regular bins. Because they are forwarded via special MOVED * nodes at bin heads (which can never change once established), * we cannot use those nodes as locks. Instead, TreeBin * extends AbstractQueuedSynchronizer to support a simple form of * read-write lock. For update operations and table validation, * the exclusive form of lock behaves in the same way as bin-head * locks. However, lookups use shared read-lock mechanics to allow * multiple readers in the absence of writers. Additionally, * these lookups do not ever block: While the lock is not * available, they proceed along the slow traversal path (via * next-pointers) until the lock becomes available or the list is * exhausted, whichever comes first. (These cases are not fast, * but maximize aggregate expected throughput.) The AQS mechanics * for doing this are straightforward. The lock state is held as * AQS getState(). Read counts are negative; the write count (1) * is positive. There are no signalling preferences among readers * and writers. Since we don't need to export full Lock API, we * just override the minimal AQS methods and use them directly. */ static final class TreeBin extends AbstractQueuedSynchronizer { private static final long serialVersionUID = 2249069246763182397L; transient TreeNode root; // root of tree transient TreeNode first; // head of next-pointer list /* AQS overrides */ public final boolean isHeldExclusively() { return getState() > 0; } public final boolean tryAcquire(int ignore) { if (compareAndSetState(0, 1)) { setExclusiveOwnerThread(Thread.currentThread()); return true; } return false; } public final boolean tryRelease(int ignore) { setExclusiveOwnerThread(null); setState(0); return true; } public final int tryAcquireShared(int ignore) { for (int c;;) { if ((c = getState()) > 0) return -1; if (compareAndSetState(c, c -1)) return 1; } } public final boolean tryReleaseShared(int ignore) { int c; do {} while (!compareAndSetState(c = getState(), c + 1)); return c == -1; } /** From CLR */ private void rotateLeft(TreeNode p) { if (p != null) { TreeNode r = p.right, pp, rl; if ((rl = p.right = r.left) != null) rl.parent = p; if ((pp = r.parent = p.parent) == null) root = r; else if (pp.left == p) pp.left = r; else pp.right = r; r.left = p; p.parent = r; } } /** From CLR */ private void rotateRight(TreeNode p) { if (p != null) { TreeNode l = p.left, pp, lr; if ((lr = p.left = l.right) != null) lr.parent = p; if ((pp = l.parent = p.parent) == null) root = l; else if (pp.right == p) pp.right = l; else pp.left = l; l.right = p; p.parent = l; } } @SuppressWarnings("unchecked") final TreeNode getTreeNode (int h, Object k, TreeNode p) { return getTreeNode(h, (RubyObject)k, p); } /** * Returns the TreeNode (or null if not found) for the given key * starting at given root. */ @SuppressWarnings("unchecked") final TreeNode getTreeNode (int h, RubyObject k, TreeNode p) { RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); while (p != null) { int dir, ph; RubyObject pk; RubyClass pc; if ((ph = p.hash) == h) { if ((pk = (RubyObject)p.key) == k || k.equals(pk)) return p; if (c != (pc = (RubyClass)pk.getMetaClass()) || kNotComparable || (dir = rubyCompare(k, pk)) == 0) { dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); if (dir == 0) { // if still stuck, need to check both sides TreeNode r = null, pl, pr; // try to recurse on the right if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) return r; // try to continue iterating on the left side else if ((pl = p.left) != null && h <= pl.hash) dir = -1; else // no matching node found return null; } } } else dir = (h < ph) ? -1 : 1; p = (dir > 0) ? p.right : p.left; } return null; } int rubyCompare(RubyObject l, RubyObject r) { ThreadContext context = l.getMetaClass().getRuntime().getCurrentContext(); IRubyObject result; try { result = l.callMethod(context, "<=>", r); } catch (RaiseException e) { // handle objects "lying" about responding to <=>, ie: an Array containing non-comparable keys if (context.runtime.getNoMethodError().isInstance(e.getException())) { return 0; } throw e; } return result.isNil() ? 0 : RubyNumeric.num2int(result.convertToInteger()); } /** * Wrapper for getTreeNode used by CHM.get. Tries to obtain * read-lock to call getTreeNode, but during failure to get * lock, searches along next links. */ final Object getValue(int h, Object k) { Node r = null; int c = getState(); // Must read lock state first for (Node e = first; e != null; e = e.next) { if (c <= 0 && compareAndSetState(c, c - 1)) { try { r = getTreeNode(h, k, root); } finally { releaseShared(0); } break; } else if ((e.hash & HASH_BITS) == h && k.equals(e.key)) { r = e; break; } else c = getState(); } return r == null ? null : r.val; } @SuppressWarnings("unchecked") final TreeNode putTreeNode (int h, Object k, Object v) { return putTreeNode(h, (RubyObject)k, v); } /** * Finds or adds a node. * @return null if added */ @SuppressWarnings("unchecked") final TreeNode putTreeNode (int h, RubyObject k, Object v) { RubyClass c = k.getMetaClass(); boolean kNotComparable = !k.respondsTo("<=>"); TreeNode pp = root, p = null; int dir = 0; while (pp != null) { // find existing node or leaf to insert at int ph; RubyObject pk; RubyClass pc; p = pp; if ((ph = p.hash) == h) { if ((pk = (RubyObject)p.key) == k || k.equals(pk)) return p; if (c != (pc = pk.getMetaClass()) || kNotComparable || (dir = rubyCompare(k, pk)) == 0) { dir = (c == pc) ? 0 : c.getName().compareTo(pc.getName()); if (dir == 0) { // if still stuck, need to check both sides TreeNode r = null, pr; // try to recurse on the right if ((pr = p.right) != null && h >= pr.hash && (r = getTreeNode(h, k, pr)) != null) return r; else // continue descending down the left subtree dir = -1; } } } else dir = (h < ph) ? -1 : 1; pp = (dir > 0) ? p.right : p.left; } TreeNode f = first; TreeNode x = first = new TreeNode(h, (Object)k, v, f, p); if (p == null) root = x; else { // attach and rebalance; adapted from CLR TreeNode xp, xpp; if (f != null) f.prev = x; if (dir <= 0) p.left = x; else p.right = x; x.red = true; while (x != null && (xp = x.parent) != null && xp.red && (xpp = xp.parent) != null) { TreeNode xppl = xpp.left; if (xp == xppl) { TreeNode y = xpp.right; if (y != null && y.red) { y.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.right) { rotateLeft(x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; rotateRight(xpp); } } } } else { TreeNode y = xppl; if (y != null && y.red) { y.red = false; xp.red = false; xpp.red = true; x = xpp; } else { if (x == xp.left) { rotateRight(x = xp); xpp = (xp = x.parent) == null ? null : xp.parent; } if (xp != null) { xp.red = false; if (xpp != null) { xpp.red = true; rotateLeft(xpp); } } } } } TreeNode r = root; if (r != null && r.red) r.red = false; } return null; } /** * Removes the given node, that must be present before this * call. This is messier than typical red-black deletion code * because we cannot swap the contents of an interior node * with a leaf successor that is pinned by "next" pointers * that are accessible independently of lock. So instead we * swap the tree linkages. */ final void deleteTreeNode(TreeNode p) { TreeNode next = (TreeNode)p.next; // unlink traversal pointers TreeNode pred = p.prev; if (pred == null) first = next; else pred.next = next; if (next != null) next.prev = pred; TreeNode replacement; TreeNode pl = p.left; TreeNode pr = p.right; if (pl != null && pr != null) { TreeNode s = pr, sl; while ((sl = s.left) != null) // find successor s = sl; boolean c = s.red; s.red = p.red; p.red = c; // swap colors TreeNode sr = s.right; TreeNode pp = p.parent; if (s == pr) { // p was s's direct parent p.parent = s; s.right = p; } else { TreeNode sp = s.parent; if ((p.parent = sp) != null) { if (s == sp.left) sp.left = p; else sp.right = p; } if ((s.right = pr) != null) pr.parent = s; } p.left = null; if ((p.right = sr) != null) sr.parent = p; if ((s.left = pl) != null) pl.parent = s; if ((s.parent = pp) == null) root = s; else if (p == pp.left) pp.left = s; else pp.right = s; replacement = sr; } else replacement = (pl != null) ? pl : pr; TreeNode pp = p.parent; if (replacement == null) { if (pp == null) { root = null; return; } replacement = p; } else { replacement.parent = pp; if (pp == null) root = replacement; else if (p == pp.left) pp.left = replacement; else pp.right = replacement; p.left = p.right = p.parent = null; } if (!p.red) { // rebalance, from CLR TreeNode x = replacement; while (x != null) { TreeNode xp, xpl; if (x.red || (xp = x.parent) == null) { x.red = false; break; } if (x == (xpl = xp.left)) { TreeNode sib = xp.right; if (sib != null && sib.red) { sib.red = false; xp.red = true; rotateLeft(xp); sib = (xp = x.parent) == null ? null : xp.right; } if (sib == null) x = xp; else { TreeNode sl = sib.left, sr = sib.right; if ((sr == null || !sr.red) && (sl == null || !sl.red)) { sib.red = true; x = xp; } else { if (sr == null || !sr.red) { if (sl != null) sl.red = false; sib.red = true; rotateRight(sib); sib = (xp = x.parent) == null ? null : xp.right; } if (sib != null) { sib.red = (xp == null) ? false : xp.red; if ((sr = sib.right) != null) sr.red = false; } if (xp != null) { xp.red = false; rotateLeft(xp); } x = root; } } } else { // symmetric TreeNode sib = xpl; if (sib != null && sib.red) { sib.red = false; xp.red = true; rotateRight(xp); sib = (xp = x.parent) == null ? null : xp.left; } if (sib == null) x = xp; else { TreeNode sl = sib.left, sr = sib.right; if ((sl == null || !sl.red) && (sr == null || !sr.red)) { sib.red = true; x = xp; } else { if (sl == null || !sl.red) { if (sr != null) sr.red = false; sib.red = true; rotateLeft(sib); sib = (xp = x.parent) == null ? null : xp.left; } if (sib != null) { sib.red = (xp == null) ? false : xp.red; if ((sl = sib.left) != null) sl.red = false; } if (xp != null) { xp.red = false; rotateRight(xp); } x = root; } } } } } if (p == replacement && (pp = p.parent) != null) { if (p == pp.left) // detach pointers pp.left = null; else if (p == pp.right) pp.right = null; p.parent = null; } } } /* ---------------- Collision reduction methods -------------- */ /** * Spreads higher bits to lower, and also forces top 2 bits to 0. * Because the table uses power-of-two masking, sets of hashes * that vary only in bits above the current mask will always * collide. (Among known examples are sets of Float keys holding * consecutive whole numbers in small tables.) To counter this, * we apply a transform that spreads the impact of higher bits * downward. There is a tradeoff between speed, utility, and * quality of bit-spreading. Because many common sets of hashes * are already reasonably distributed across bits (so don't benefit * from spreading), and because we use trees to handle large sets * of collisions in bins, we don't need excessively high quality. */ private static final int spread(int h) { h ^= (h >>> 18) ^ (h >>> 12); return (h ^ (h >>> 10)) & HASH_BITS; } /** * Replaces a list bin with a tree bin. Call only when locked. * Fails to replace if the given key is non-comparable or table * is, or needs, resizing. */ private final void replaceWithTreeBin(Node[] tab, int index, Object key) { if ((key instanceof Comparable) && (tab.length >= MAXIMUM_CAPACITY || counter.sum() < (long)sizeCtl)) { TreeBin t = new TreeBin(); for (Node e = tabAt(tab, index); e != null; e = e.next) t.putTreeNode(e.hash & HASH_BITS, e.key, e.val); setTabAt(tab, index, new Node(MOVED, t, null, null)); } } /* ---------------- Internal access and update methods -------------- */ /** Implementation for get and containsKey */ private final Object internalGet(Object k) { int h = spread(k.hashCode()); retry: for (Node[] tab = table; tab != null;) { Node e, p; Object ek, ev; int eh; // locals to read fields once for (e = tabAt(tab, (tab.length - 1) & h); e != null; e = e.next) { if ((eh = e.hash) == MOVED) { if ((ek = e.key) instanceof TreeBin) // search TreeBin return ((TreeBin)ek).getValue(h, k); else { // restart with new table tab = (Node[])ek; continue retry; } } else if ((eh & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) return ev; } break; } return null; } /** * Implementation for the four public remove/replace methods: * Replaces node value with v, conditional upon match of cv if * non-null. If resulting value is null, delete. */ private final Object internalReplace(Object k, Object v, Object cv) { int h = spread(k.hashCode()); Object oldVal = null; for (Node[] tab = table;;) { Node f; int i, fh; Object fk; if (tab == null || (f = tabAt(tab, i = (tab.length - 1) & h)) == null) break; else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean validated = false; boolean deleted = false; t.acquire(0); try { if (tabAt(tab, i) == f) { validated = true; TreeNode p = t.getTreeNode(h, k, t.root); if (p != null) { Object pv = p.val; if (cv == null || cv == pv || cv.equals(pv)) { oldVal = pv; if ((p.val = v) == null) { deleted = true; t.deleteTreeNode(p); } } } } } finally { t.release(0); } if (validated) { if (deleted) counter.add(-1L); break; } } else tab = (Node[])fk; } else if ((fh & HASH_BITS) != h && f.next == null) // precheck break; // rules out possible existence else if ((fh & LOCKED) != 0) { checkForResize(); // try resizing if can't get lock f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { boolean validated = false; boolean deleted = false; try { if (tabAt(tab, i) == f) { validated = true; for (Node e = f, pred = null;;) { Object ek, ev; if ((e.hash & HASH_BITS) == h && ((ev = e.val) != null) && ((ek = e.key) == k || k.equals(ek))) { if (cv == null || cv == ev || cv.equals(ev)) { oldVal = ev; if ((e.val = v) == null) { deleted = true; Node en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } } break; } pred = e; if ((e = e.next) == null) break; } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (validated) { if (deleted) counter.add(-1L); break; } } } return oldVal; } /* * Internal versions of the six insertion methods, each a * little more complicated than the last. All have * the same basic structure as the first (internalPut): * 1. If table uninitialized, create * 2. If bin empty, try to CAS new node * 3. If bin stale, use new table * 4. if bin converted to TreeBin, validate and relay to TreeBin methods * 5. Lock and validate; if valid, scan and add or update * * The others interweave other checks and/or alternative actions: * * Plain put checks for and performs resize after insertion. * * putIfAbsent prescans for mapping without lock (and fails to add * if present), which also makes pre-emptive resize checks worthwhile. * * computeIfAbsent extends form used in putIfAbsent with additional * mechanics to deal with, calls, potential exceptions and null * returns from function call. * * compute uses the same function-call mechanics, but without * the prescans * * merge acts as putIfAbsent in the absent case, but invokes the * update function if present * * putAll attempts to pre-allocate enough table space * and more lazily performs count updates and checks. * * Someday when details settle down a bit more, it might be worth * some factoring to reduce sprawl. */ /** Implementation for put */ private final Object internalPut(Object k, Object v) { int h = spread(k.hashCode()); int count = 0; for (Node[] tab = table;;) { int i; Node f; int fh; Object fk; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node(h, k, v, null))) break; // no lock when adding to empty bin } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; Object oldVal = null; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 2; TreeNode p = t.putTreeNode(h, k, v); if (p != null) { oldVal = p.val; p.val = v; } } } finally { t.release(0); } if (count != 0) { if (oldVal != null) return oldVal; break; } } else tab = (Node[])fk; } else if ((fh & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { Object oldVal = null; try { // needed in case equals() throws if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { oldVal = ev; e.val = v; break; } Node last = e; if ((e = e.next) == null) { last.next = new Node(h, k, v, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { // unlock and signal if needed if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (oldVal != null) return oldVal; if (tab.length <= 64) count = 2; break; } } } counter.add(1L); if (count > 1) checkForResize(); return null; } /** Implementation for putIfAbsent */ private final Object internalPutIfAbsent(Object k, Object v) { int h = spread(k.hashCode()); int count = 0; for (Node[] tab = table;;) { int i; Node f; int fh; Object fk, fv; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node(h, k, v, null))) break; } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; Object oldVal = null; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 2; TreeNode p = t.putTreeNode(h, k, v); if (p != null) oldVal = p.val; } } finally { t.release(0); } if (count != 0) { if (oldVal != null) return oldVal; break; } } else tab = (Node[])fk; } else if ((fh & HASH_BITS) == h && (fv = f.val) != null && ((fk = f.key) == k || k.equals(fk))) return fv; else { Node g = f.next; if (g != null) { // at least 2 nodes -- search and maybe resize for (Node e = g;;) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) return ev; if ((e = e.next) == null) { checkForResize(); break; } } } if (((fh = f.hash) & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { Object oldVal = null; try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { oldVal = ev; break; } Node last = e; if ((e = e.next) == null) { last.next = new Node(h, k, v, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (oldVal != null) return oldVal; if (tab.length <= 64) count = 2; break; } } } } counter.add(1L); if (count > 1) checkForResize(); return null; } /** Implementation for computeIfAbsent */ private final Object internalComputeIfAbsent(K k, Fun mf) { int h = spread(k.hashCode()); Object val = null; int count = 0; for (Node[] tab = table;;) { Node f; int i, fh; Object fk, fv; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { Node node = new Node(fh = h | LOCKED, k, null, null); if (casTabAt(tab, i, null, node)) { count = 1; try { if ((val = mf.apply(k)) != null) node.val = val; } finally { if (val == null) setTabAt(tab, i, null); if (!node.casHash(fh, h)) { node.hash = h; synchronized (node) { node.notifyAll(); }; } } } if (count != 0) break; } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean added = false; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 1; TreeNode p = t.getTreeNode(h, k, t.root); if (p != null) val = p.val; else if ((val = mf.apply(k)) != null) { added = true; count = 2; t.putTreeNode(h, k, val); } } } finally { t.release(0); } if (count != 0) { if (!added) return val; break; } } else tab = (Node[])fk; } else if ((fh & HASH_BITS) == h && (fv = f.val) != null && ((fk = f.key) == k || k.equals(fk))) return fv; else { Node g = f.next; if (g != null) { for (Node e = g;;) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) return ev; if ((e = e.next) == null) { checkForResize(); break; } } } if (((fh = f.hash) & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (tabAt(tab, i) == f && f.casHash(fh, fh | LOCKED)) { boolean added = false; try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { val = ev; break; } Node last = e; if ((e = e.next) == null) { if ((val = mf.apply(k)) != null) { added = true; last.next = new Node(h, k, val, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); } break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (!added) return val; if (tab.length <= 64) count = 2; break; } } } } if (val != null) { counter.add(1L); if (count > 1) checkForResize(); } return val; } /** Implementation for compute */ @SuppressWarnings("unchecked") private final Object internalCompute (K k, boolean onlyIfPresent, BiFun mf) { int h = spread(k.hashCode()); Object val = null; int delta = 0; int count = 0; for (Node[] tab = table;;) { Node f; int i, fh; Object fk; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { if (onlyIfPresent) break; Node node = new Node(fh = h | LOCKED, k, null, null); if (casTabAt(tab, i, null, node)) { try { count = 1; if ((val = mf.apply(k, null)) != null) { node.val = val; delta = 1; } } finally { if (delta == 0) setTabAt(tab, i, null); if (!node.casHash(fh, h)) { node.hash = h; synchronized (node) { node.notifyAll(); }; } } } if (count != 0) break; } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 1; TreeNode p = t.getTreeNode(h, k, t.root); Object pv; if (p == null) { if (onlyIfPresent) break; pv = null; } else pv = p.val; if ((val = mf.apply(k, (V)pv)) != null) { if (p != null) p.val = val; else { count = 2; delta = 1; t.putTreeNode(h, k, val); } } else if (p != null) { delta = -1; t.deleteTreeNode(p); } } } finally { t.release(0); } if (count != 0) break; } else tab = (Node[])fk; } else if ((fh & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f, pred = null;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { val = mf.apply(k, (V)ev); if (val != null) e.val = val; else { delta = -1; Node en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { if (!onlyIfPresent && (val = mf.apply(k, null)) != null) { pred.next = new Node(h, k, val, null); delta = 1; if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); } break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (tab.length <= 64) count = 2; break; } } } if (delta != 0) { counter.add((long)delta); if (count > 1) checkForResize(); } return val; } /** Implementation for merge */ @SuppressWarnings("unchecked") private final Object internalMerge (K k, V v, BiFun mf) { int h = spread(k.hashCode()); Object val = null; int delta = 0; int count = 0; for (Node[] tab = table;;) { int i; Node f; int fh; Object fk, fv; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { if (casTabAt(tab, i, null, new Node(h, k, v, null))) { delta = 1; val = v; break; } } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; t.acquire(0); try { if (tabAt(tab, i) == f) { count = 1; TreeNode p = t.getTreeNode(h, k, t.root); val = (p == null) ? v : mf.apply((V)p.val, v); if (val != null) { if (p != null) p.val = val; else { count = 2; delta = 1; t.putTreeNode(h, k, val); } } else if (p != null) { delta = -1; t.deleteTreeNode(p); } } } finally { t.release(0); } if (count != 0) break; } else tab = (Node[])fk; } else if ((fh & LOCKED) != 0) { checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f, pred = null;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { val = mf.apply((V)ev, v); if (val != null) e.val = val; else { delta = -1; Node en = e.next; if (pred != null) pred.next = en; else setTabAt(tab, i, en); } break; } pred = e; if ((e = e.next) == null) { val = v; pred.next = new Node(h, k, val, null); delta = 1; if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (tab.length <= 64) count = 2; break; } } } if (delta != 0) { counter.add((long)delta); if (count > 1) checkForResize(); } return val; } /** Implementation for putAll */ private final void internalPutAll(Map m) { tryPresize(m.size()); long delta = 0L; // number of uncommitted additions boolean npe = false; // to throw exception on exit for nulls try { // to clean up counts on other exceptions for (Map.Entry entry : m.entrySet()) { Object k, v; if (entry == null || (k = entry.getKey()) == null || (v = entry.getValue()) == null) { npe = true; break; } int h = spread(k.hashCode()); for (Node[] tab = table;;) { int i; Node f; int fh; Object fk; if (tab == null) tab = initTable(); else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ if (casTabAt(tab, i, null, new Node(h, k, v, null))) { ++delta; break; } } else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean validated = false; t.acquire(0); try { if (tabAt(tab, i) == f) { validated = true; TreeNode p = t.getTreeNode(h, k, t.root); if (p != null) p.val = v; else { t.putTreeNode(h, k, v); ++delta; } } } finally { t.release(0); } if (validated) break; } else tab = (Node[])fk; } else if ((fh & LOCKED) != 0) { counter.add(delta); delta = 0L; checkForResize(); f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { int count = 0; try { if (tabAt(tab, i) == f) { count = 1; for (Node e = f;; ++count) { Object ek, ev; if ((e.hash & HASH_BITS) == h && (ev = e.val) != null && ((ek = e.key) == k || k.equals(ek))) { e.val = v; break; } Node last = e; if ((e = e.next) == null) { ++delta; last.next = new Node(h, k, v, null); if (count >= TREE_THRESHOLD) replaceWithTreeBin(tab, i, k); break; } } } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (count != 0) { if (count > 1) { counter.add(delta); delta = 0L; checkForResize(); } break; } } } } } finally { if (delta != 0) counter.add(delta); } if (npe) throw new NullPointerException(); } /* ---------------- Table Initialization and Resizing -------------- */ /** * Returns a power of two table size for the given desired capacity. * See Hackers Delight, sec 3.2 */ private static final int tableSizeFor(int c) { int n = c - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; } /** * Initializes table, using the size recorded in sizeCtl. */ private final Node[] initTable() { Node[] tab; int sc; while ((tab = table) == null) { if ((sc = sizeCtl) < 0) Thread.yield(); // lost initialization race; just spin else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { try { if ((tab = table) == null) { int n = (sc > 0) ? sc : DEFAULT_CAPACITY; tab = table = new Node[n]; sc = n - (n >>> 2); } } finally { sizeCtl = sc; } break; } } return tab; } /** * If table is too small and not already resizing, creates next * table and transfers bins. Rechecks occupancy after a transfer * to see if another resize is already needed because resizings * are lagging additions. */ private final void checkForResize() { Node[] tab; int n, sc; while ((tab = table) != null && (n = tab.length) < MAXIMUM_CAPACITY && (sc = sizeCtl) >= 0 && counter.sum() >= (long)sc && UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { try { if (tab == table) { table = rebuild(tab); sc = (n << 1) - (n >>> 1); } } finally { sizeCtl = sc; } } } /** * Tries to presize table to accommodate the given number of elements. * * @param size number of elements (doesn't need to be perfectly accurate) */ private final void tryPresize(int size) { int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(size + (size >>> 1) + 1); int sc; while ((sc = sizeCtl) >= 0) { Node[] tab = table; int n; if (tab == null || (n = tab.length) == 0) { n = (sc > c) ? sc : c; if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { try { if (table == tab) { table = new Node[n]; sc = n - (n >>> 2); } } finally { sizeCtl = sc; } } } else if (c <= sc || n >= MAXIMUM_CAPACITY) break; else if (UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { try { if (table == tab) { table = rebuild(tab); sc = (n << 1) - (n >>> 1); } } finally { sizeCtl = sc; } } } } /* * Moves and/or copies the nodes in each bin to new table. See * above for explanation. * * @return the new table */ private static final Node[] rebuild(Node[] tab) { int n = tab.length; Node[] nextTab = new Node[n << 1]; Node fwd = new Node(MOVED, nextTab, null, null); int[] buffer = null; // holds bins to revisit; null until needed Node rev = null; // reverse forwarder; null until needed int nbuffered = 0; // the number of bins in buffer list int bufferIndex = 0; // buffer index of current buffered bin int bin = n - 1; // current non-buffered bin or -1 if none for (int i = bin;;) { // start upwards sweep int fh; Node f; if ((f = tabAt(tab, i)) == null) { if (bin >= 0) { // Unbuffered; no lock needed (or available) if (!casTabAt(tab, i, f, fwd)) continue; } else { // transiently use a locked forwarding node Node g = new Node(MOVED|LOCKED, nextTab, null, null); if (!casTabAt(tab, i, f, g)) continue; setTabAt(nextTab, i, null); setTabAt(nextTab, i + n, null); setTabAt(tab, i, fwd); if (!g.casHash(MOVED|LOCKED, MOVED)) { g.hash = MOVED; synchronized (g) { g.notifyAll(); } } } } else if ((fh = f.hash) == MOVED) { Object fk = f.key; if (fk instanceof TreeBin) { TreeBin t = (TreeBin)fk; boolean validated = false; t.acquire(0); try { if (tabAt(tab, i) == f) { validated = true; splitTreeBin(nextTab, i, t); setTabAt(tab, i, fwd); } } finally { t.release(0); } if (!validated) continue; } } else if ((fh & LOCKED) == 0 && f.casHash(fh, fh|LOCKED)) { boolean validated = false; try { // split to lo and hi lists; copying as needed if (tabAt(tab, i) == f) { validated = true; splitBin(nextTab, i, f); setTabAt(tab, i, fwd); } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } if (!validated) continue; } else { if (buffer == null) // initialize buffer for revisits buffer = new int[TRANSFER_BUFFER_SIZE]; if (bin < 0 && bufferIndex > 0) { int j = buffer[--bufferIndex]; buffer[bufferIndex] = i; i = j; // swap with another bin continue; } if (bin < 0 || nbuffered >= TRANSFER_BUFFER_SIZE) { f.tryAwaitLock(tab, i); continue; // no other options -- block } if (rev == null) // initialize reverse-forwarder rev = new Node(MOVED, tab, null, null); if (tabAt(tab, i) != f || (f.hash & LOCKED) == 0) continue; // recheck before adding to list buffer[nbuffered++] = i; setTabAt(nextTab, i, rev); // install place-holders setTabAt(nextTab, i + n, rev); } if (bin > 0) i = --bin; else if (buffer != null && nbuffered > 0) { bin = -1; i = buffer[bufferIndex = --nbuffered]; } else return nextTab; } } /** * Splits a normal bin with list headed by e into lo and hi parts; * installs in given table. */ private static void splitBin(Node[] nextTab, int i, Node e) { int bit = nextTab.length >>> 1; // bit to split on int runBit = e.hash & bit; Node lastRun = e, lo = null, hi = null; for (Node p = e.next; p != null; p = p.next) { int b = p.hash & bit; if (b != runBit) { runBit = b; lastRun = p; } } if (runBit == 0) lo = lastRun; else hi = lastRun; for (Node p = e; p != lastRun; p = p.next) { int ph = p.hash & HASH_BITS; Object pk = p.key, pv = p.val; if ((ph & bit) == 0) lo = new Node(ph, pk, pv, lo); else hi = new Node(ph, pk, pv, hi); } setTabAt(nextTab, i, lo); setTabAt(nextTab, i + bit, hi); } /** * Splits a tree bin into lo and hi parts; installs in given table. */ private static void splitTreeBin(Node[] nextTab, int i, TreeBin t) { int bit = nextTab.length >>> 1; TreeBin lt = new TreeBin(); TreeBin ht = new TreeBin(); int lc = 0, hc = 0; for (Node e = t.first; e != null; e = e.next) { int h = e.hash & HASH_BITS; Object k = e.key, v = e.val; if ((h & bit) == 0) { ++lc; lt.putTreeNode(h, k, v); } else { ++hc; ht.putTreeNode(h, k, v); } } Node ln, hn; // throw away trees if too small if (lc <= (TREE_THRESHOLD >>> 1)) { ln = null; for (Node p = lt.first; p != null; p = p.next) ln = new Node(p.hash, p.key, p.val, ln); } else ln = new Node(MOVED, lt, null, null); setTabAt(nextTab, i, ln); if (hc <= (TREE_THRESHOLD >>> 1)) { hn = null; for (Node p = ht.first; p != null; p = p.next) hn = new Node(p.hash, p.key, p.val, hn); } else hn = new Node(MOVED, ht, null, null); setTabAt(nextTab, i + bit, hn); } /** * Implementation for clear. Steps through each bin, removing all * nodes. */ private final void internalClear() { long delta = 0L; // negative number of deletions int i = 0; Node[] tab = table; while (tab != null && i < tab.length) { int fh; Object fk; Node f = tabAt(tab, i); if (f == null) ++i; else if ((fh = f.hash) == MOVED) { if ((fk = f.key) instanceof TreeBin) { TreeBin t = (TreeBin)fk; t.acquire(0); try { if (tabAt(tab, i) == f) { for (Node p = t.first; p != null; p = p.next) { if (p.val != null) { // (currently always true) p.val = null; --delta; } } t.first = null; t.root = null; ++i; } } finally { t.release(0); } } else tab = (Node[])fk; } else if ((fh & LOCKED) != 0) { counter.add(delta); // opportunistically update count delta = 0L; f.tryAwaitLock(tab, i); } else if (f.casHash(fh, fh | LOCKED)) { try { if (tabAt(tab, i) == f) { for (Node e = f; e != null; e = e.next) { if (e.val != null) { // (currently always true) e.val = null; --delta; } } setTabAt(tab, i, null); ++i; } } finally { if (!f.casHash(fh | LOCKED, fh)) { f.hash = fh; synchronized (f) { f.notifyAll(); }; } } } } if (delta != 0) counter.add(delta); } /* ----------------Table Traversal -------------- */ /** * Encapsulates traversal for methods such as containsValue; also * serves as a base class for other iterators and bulk tasks. * * At each step, the iterator snapshots the key ("nextKey") and * value ("nextVal") of a valid node (i.e., one that, at point of * snapshot, has a non-null user value). Because val fields can * change (including to null, indicating deletion), field nextVal * might not be accurate at point of use, but still maintains the * weak consistency property of holding a value that was once * valid. To support iterator.remove, the nextKey field is not * updated (nulled out) when the iterator cannot advance. * * Internal traversals directly access these fields, as in: * {@code while (it.advance() != null) { process(it.nextKey); }} * * Exported iterators must track whether the iterator has advanced * (in hasNext vs next) (by setting/checking/nulling field * nextVal), and then extract key, value, or key-value pairs as * return values of next(). * * The iterator visits once each still-valid node that was * reachable upon iterator construction. It might miss some that * were added to a bin after the bin was visited, which is OK wrt * consistency guarantees. Maintaining this property in the face * of possible ongoing resizes requires a fair amount of * bookkeeping state that is difficult to optimize away amidst * volatile accesses. Even so, traversal maintains reasonable * throughput. * * Normally, iteration proceeds bin-by-bin traversing lists. * However, if the table has been resized, then all future steps * must traverse both the bin at the current index as well as at * (index + baseSize); and so on for further resizings. To * paranoically cope with potential sharing by users of iterators * across threads, iteration terminates if a bounds checks fails * for a table read. * * This class extends ForkJoinTask to streamline parallel * iteration in bulk operations (see BulkTask). This adds only an * int of space overhead, which is close enough to negligible in * cases where it is not needed to not worry about it. Because * ForkJoinTask is Serializable, but iterators need not be, we * need to add warning suppressions. */ @SuppressWarnings("serial") static class Traverser { final ConcurrentHashMapV8 map; Node next; // the next entry to use K nextKey; // cached key field of next V nextVal; // cached val field of next Node[] tab; // current table; updated if resized int index; // index of bin to use next int baseIndex; // current index of initial table int baseLimit; // index bound for initial table int baseSize; // initial table size /** Creates iterator for all entries in the table. */ Traverser(ConcurrentHashMapV8 map) { this.map = map; } /** Creates iterator for split() methods */ Traverser(Traverser it) { ConcurrentHashMapV8 m; Node[] t; if ((m = this.map = it.map) == null) t = null; else if ((t = it.tab) == null && // force parent tab initialization (t = it.tab = m.table) != null) it.baseLimit = it.baseSize = t.length; this.tab = t; this.baseSize = it.baseSize; it.baseLimit = this.index = this.baseIndex = ((this.baseLimit = it.baseLimit) + it.baseIndex + 1) >>> 1; } /** * Advances next; returns nextVal or null if terminated. * See above for explanation. */ final V advance() { Node e = next; V ev = null; outer: do { if (e != null) // advance past used/skipped node e = e.next; while (e == null) { // get to next non-null bin ConcurrentHashMapV8 m; Node[] t; int b, i, n; Object ek; // checks must use locals if ((t = tab) != null) n = t.length; else if ((m = map) != null && (t = tab = m.table) != null) n = baseLimit = baseSize = t.length; else break outer; if ((b = baseIndex) >= baseLimit || (i = index) < 0 || i >= n) break outer; if ((e = tabAt(t, i)) != null && e.hash == MOVED) { if ((ek = e.key) instanceof TreeBin) e = ((TreeBin)ek).first; else { tab = (Node[])ek; continue; // restarts due to null val } } // visit upper slots if present index = (i += baseSize) < n ? i : (baseIndex = b + 1); } nextKey = (K) e.key; } while ((ev = (V) e.val) == null); // skip deleted or special nodes next = e; return nextVal = ev; } public final void remove() { Object k = nextKey; if (k == null && (advance() == null || (k = nextKey) == null)) throw new IllegalStateException(); map.internalReplace(k, null, null); } public final boolean hasNext() { return nextVal != null || advance() != null; } public final boolean hasMoreElements() { return hasNext(); } public final void setRawResult(Object x) { } public R getRawResult() { return null; } public boolean exec() { return true; } } /* ---------------- Public operations -------------- */ /** * Creates a new, empty map with the default initial table size (16). */ public ConcurrentHashMapV8() { this.counter = new LongAdder(); } /** * Creates a new, empty map with an initial table size * accommodating the specified number of elements without the need * to dynamically resize. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative */ public ConcurrentHashMapV8(int initialCapacity) { if (initialCapacity < 0) throw new IllegalArgumentException(); int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); this.counter = new LongAdder(); this.sizeCtl = cap; } /** * Creates a new map with the same mappings as the given map. * * @param m the map */ public ConcurrentHashMapV8(Map m) { this.counter = new LongAdder(); this.sizeCtl = DEFAULT_CAPACITY; internalPutAll(m); } /** * Creates a new, empty map with an initial table size based on * the given number of elements ({@code initialCapacity}) and * initial table density ({@code loadFactor}). * * @param initialCapacity the initial capacity. The implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadFactor the load factor (table density) for * establishing the initial table size * @throws IllegalArgumentException if the initial capacity of * elements is negative or the load factor is nonpositive * * @since 1.6 */ public ConcurrentHashMapV8(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, 1); } /** * Creates a new, empty map with an initial table size based on * the given number of elements ({@code initialCapacity}), table * density ({@code loadFactor}), and number of concurrently * updating threads ({@code concurrencyLevel}). * * @param initialCapacity the initial capacity. The implementation * performs internal sizing to accommodate this many elements, * given the specified load factor. * @param loadFactor the load factor (table density) for * establishing the initial table size * @param concurrencyLevel the estimated number of concurrently * updating threads. The implementation may use this value as * a sizing hint. * @throws IllegalArgumentException if the initial capacity is * negative or the load factor or concurrencyLevel are * nonpositive */ public ConcurrentHashMapV8(int initialCapacity, float loadFactor, int concurrencyLevel) { if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) throw new IllegalArgumentException(); if (initialCapacity < concurrencyLevel) // Use at least as many bins initialCapacity = concurrencyLevel; // as estimated threads long size = (long)(1.0 + (long)initialCapacity / loadFactor); int cap = (size >= (long)MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int)size); this.counter = new LongAdder(); this.sizeCtl = cap; } /** * Creates a new {@link Set} backed by a ConcurrentHashMapV8 * from the given type to {@code Boolean.TRUE}. * * @return the new set */ public static KeySetView newKeySet() { return new KeySetView(new ConcurrentHashMapV8(), Boolean.TRUE); } /** * Creates a new {@link Set} backed by a ConcurrentHashMapV8 * from the given type to {@code Boolean.TRUE}. * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. * @throws IllegalArgumentException if the initial capacity of * elements is negative * @return the new set */ public static KeySetView newKeySet(int initialCapacity) { return new KeySetView(new ConcurrentHashMapV8(initialCapacity), Boolean.TRUE); } /** * {@inheritDoc} */ public boolean isEmpty() { return counter.sum() <= 0L; // ignore transient negative values } /** * {@inheritDoc} */ public int size() { long n = counter.sum(); return ((n < 0L) ? 0 : (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int)n); } /** * Returns the number of mappings. This method should be used * instead of {@link #size} because a ConcurrentHashMapV8 may * contain more mappings than can be represented as an int. The * value returned is a snapshot; the actual count may differ if * there are ongoing concurrent insertions or removals. * * @return the number of mappings */ public long mappingCount() { long n = counter.sum(); return (n < 0L) ? 0L : n; // ignore transient negative values } /** * Returns the value to which the specified key is mapped, * or {@code null} if this map contains no mapping for the key. * *

More formally, if this map contains a mapping from a key * {@code k} to a value {@code v} such that {@code key.equals(k)}, * then this method returns {@code v}; otherwise it returns * {@code null}. (There can be at most one such mapping.) * * @throws NullPointerException if the specified key is null */ @SuppressWarnings("unchecked") public V get(Object key) { if (key == null) throw new NullPointerException(); return (V)internalGet(key); } /** * Returns the value to which the specified key is mapped, * or the given defaultValue if this map contains no mapping for the key. * * @param key the key * @param defaultValue the value to return if this map contains * no mapping for the given key * @return the mapping for the key, if present; else the defaultValue * @throws NullPointerException if the specified key is null */ @SuppressWarnings("unchecked") public V getValueOrDefault(Object key, V defaultValue) { if (key == null) throw new NullPointerException(); V v = (V) internalGet(key); return v == null ? defaultValue : v; } /** * Tests if the specified object is a key in this table. * * @param key possible key * @return {@code true} if and only if the specified object * is a key in this table, as determined by the * {@code equals} method; {@code false} otherwise * @throws NullPointerException if the specified key is null */ public boolean containsKey(Object key) { if (key == null) throw new NullPointerException(); return internalGet(key) != null; } /** * Returns {@code true} if this map maps one or more keys to the * specified value. Note: This method may require a full traversal * of the map, and is much slower than method {@code containsKey}. * * @param value value whose presence in this map is to be tested * @return {@code true} if this map maps one or more keys to the * specified value * @throws NullPointerException if the specified value is null */ public boolean containsValue(Object value) { if (value == null) throw new NullPointerException(); Object v; Traverser it = new Traverser(this); while ((v = it.advance()) != null) { if (v == value || value.equals(v)) return true; } return false; } public K findKey(Object value) { if (value == null) throw new NullPointerException(); Object v; Traverser it = new Traverser(this); while ((v = it.advance()) != null) { if (v == value || value.equals(v)) return it.nextKey; } return null; } /** * Legacy method testing if some key maps into the specified value * in this table. This method is identical in functionality to * {@link #containsValue}, and exists solely to ensure * full compatibility with class {@link java.util.Hashtable}, * which supported this method prior to introduction of the * Java Collections framework. * * @param value a value to search for * @return {@code true} if and only if some key maps to the * {@code value} argument in this table as * determined by the {@code equals} method; * {@code false} otherwise * @throws NullPointerException if the specified value is null */ public boolean contains(Object value) { return containsValue(value); } /** * Maps the specified key to the specified value in this table. * Neither the key nor the value can be null. * *

The value can be retrieved by calling the {@code get} method * with a key that is equal to the original key. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key or value is null */ @SuppressWarnings("unchecked") public V put(K key, V value) { if (key == null || value == null) throw new NullPointerException(); return (V)internalPut(key, value); } /** * {@inheritDoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ @SuppressWarnings("unchecked") public V putIfAbsent(K key, V value) { if (key == null || value == null) throw new NullPointerException(); return (V)internalPutIfAbsent(key, value); } /** * Copies all of the mappings from the specified map to this one. * These mappings replace any mappings that this map had for any of the * keys currently in the specified map. * * @param m mappings to be stored in this map */ public void putAll(Map m) { internalPutAll(m); } /** * If the specified key is not already associated with a value, * computes its value using the given mappingFunction and enters * it into the map unless null. This is equivalent to *

 {@code
     * if (map.containsKey(key))
     *   return map.get(key);
     * value = mappingFunction.apply(key);
     * if (value != null)
     *   map.put(key, value);
     * return value;}
* * except that the action is performed atomically. If the * function returns {@code null} no mapping is recorded. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and no mapping is recorded. Some * attempted update operations on this map by other threads may be * blocked while computation is in progress, so the computation * should be short and simple, and must not attempt to update any * other mappings of this Map. The most appropriate usage is to * construct a new object serving as an initial mapped value, or * memoized result, as in: * *
 {@code
     * map.computeIfAbsent(key, new Fun() {
     *   public V map(K k) { return new Value(f(k)); }});}
* * @param key key with which the specified value is to be associated * @param mappingFunction the function to compute a value * @return the current (existing or computed) value associated with * the specified key, or null if the computed value is null * @throws NullPointerException if the specified key or mappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the mappingFunction does so, * in which case the mapping is left unestablished */ @SuppressWarnings("unchecked") public V computeIfAbsent (K key, Fun mappingFunction) { if (key == null || mappingFunction == null) throw new NullPointerException(); return (V)internalComputeIfAbsent(key, mappingFunction); } /** * If the given key is present, computes a new mapping value given a key and * its current mapped value. This is equivalent to *
 {@code
     *   if (map.containsKey(key)) {
     *     value = remappingFunction.apply(key, map.get(key));
     *     if (value != null)
     *       map.put(key, value);
     *     else
     *       map.remove(key);
     *   }
     * }
* * except that the action is performed atomically. If the * function returns {@code null}, the mapping is removed. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and the current mapping is left * unchanged. Some attempted update operations on this map by * other threads may be blocked while computation is in progress, * so the computation should be short and simple, and must not * attempt to update any other mappings of this Map. For example, * to either create or append new messages to a value mapping: * * @param key key with which the specified value is to be associated * @param remappingFunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws NullPointerException if the specified key or remappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ @SuppressWarnings("unchecked") public V computeIfPresent (K key, BiFun remappingFunction) { if (key == null || remappingFunction == null) throw new NullPointerException(); return (V)internalCompute(key, true, remappingFunction); } /** * Computes a new mapping value given a key and * its current mapped value (or {@code null} if there is no current * mapping). This is equivalent to *
 {@code
     *   value = remappingFunction.apply(key, map.get(key));
     *   if (value != null)
     *     map.put(key, value);
     *   else
     *     map.remove(key);
     * }
* * except that the action is performed atomically. If the * function returns {@code null}, the mapping is removed. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and the current mapping is left * unchanged. Some attempted update operations on this map by * other threads may be blocked while computation is in progress, * so the computation should be short and simple, and must not * attempt to update any other mappings of this Map. For example, * to either create or append new messages to a value mapping: * *
 {@code
     * Map map = ...;
     * final String msg = ...;
     * map.compute(key, new BiFun() {
     *   public String apply(Key k, String v) {
     *    return (v == null) ? msg : v + msg;});}}
* * @param key key with which the specified value is to be associated * @param remappingFunction the function to compute a value * @return the new value associated with the specified key, or null if none * @throws NullPointerException if the specified key or remappingFunction * is null * @throws IllegalStateException if the computation detectably * attempts a recursive update to this map that would * otherwise never complete * @throws RuntimeException or Error if the remappingFunction does so, * in which case the mapping is unchanged */ @SuppressWarnings("unchecked") public V compute (K key, BiFun remappingFunction) { if (key == null || remappingFunction == null) throw new NullPointerException(); return (V)internalCompute(key, false, remappingFunction); } /** * If the specified key is not already associated * with a value, associate it with the given value. * Otherwise, replace the value with the results of * the given remapping function. This is equivalent to: *
 {@code
     *   if (!map.containsKey(key))
     *     map.put(value);
     *   else {
     *     newValue = remappingFunction.apply(map.get(key), value);
     *     if (value != null)
     *       map.put(key, value);
     *     else
     *       map.remove(key);
     *   }
     * }
* except that the action is performed atomically. If the * function returns {@code null}, the mapping is removed. If the * function itself throws an (unchecked) exception, the exception * is rethrown to its caller, and the current mapping is left * unchanged. Some attempted update operations on this map by * other threads may be blocked while computation is in progress, * so the computation should be short and simple, and must not * attempt to update any other mappings of this Map. */ @SuppressWarnings("unchecked") public V merge (K key, V value, BiFun remappingFunction) { if (key == null || value == null || remappingFunction == null) throw new NullPointerException(); return (V)internalMerge(key, value, remappingFunction); } /** * Removes the key (and its corresponding value) from this map. * This method does nothing if the key is not in the map. * * @param key the key that needs to be removed * @return the previous value associated with {@code key}, or * {@code null} if there was no mapping for {@code key} * @throws NullPointerException if the specified key is null */ @SuppressWarnings("unchecked") public V remove(Object key) { if (key == null) throw new NullPointerException(); return (V)internalReplace(key, null, null); } /** * {@inheritDoc} * * @throws NullPointerException if the specified key is null */ public boolean remove(Object key, Object value) { if (key == null) throw new NullPointerException(); if (value == null) return false; return internalReplace(key, null, value) != null; } /** * {@inheritDoc} * * @throws NullPointerException if any of the arguments are null */ public boolean replace(K key, V oldValue, V newValue) { if (key == null || oldValue == null || newValue == null) throw new NullPointerException(); return internalReplace(key, newValue, oldValue) != null; } /** * {@inheritDoc} * * @return the previous value associated with the specified key, * or {@code null} if there was no mapping for the key * @throws NullPointerException if the specified key or value is null */ @SuppressWarnings("unchecked") public V replace(K key, V value) { if (key == null || value == null) throw new NullPointerException(); return (V)internalReplace(key, value, null); } /** * Removes all of the mappings from this map. */ public void clear() { internalClear(); } /** * Returns a {@link Set} view of the keys contained in this map. * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. * * @return the set view */ public KeySetView keySet() { KeySetView ks = keySet; return (ks != null) ? ks : (keySet = new KeySetView(this, null)); } /** * Returns a {@link Set} view of the keys in this map, using the * given common mapped value for any additions (i.e., {@link * Collection#add} and {@link Collection#addAll}). This is of * course only appropriate if it is acceptable to use the same * value for all additions from this view. * * @param mappedValue the mapped value to use for any * additions. * @return the set view * @throws NullPointerException if the mappedValue is null */ public KeySetView keySet(V mappedValue) { if (mappedValue == null) throw new NullPointerException(); return new KeySetView(this, mappedValue); } /** * Returns a {@link Collection} view of the values contained in this map. * The collection is backed by the map, so changes to the map are * reflected in the collection, and vice-versa. */ public ValuesView values() { ValuesView vs = values; return (vs != null) ? vs : (values = new ValuesView(this)); } /** * Returns a {@link Set} view of the mappings contained in this map. * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. The set supports element * removal, which removes the corresponding mapping from the map, * via the {@code Iterator.remove}, {@code Set.remove}, * {@code removeAll}, {@code retainAll}, and {@code clear} * operations. It does not support the {@code add} or * {@code addAll} operations. * *

The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. */ public Set> entrySet() { EntrySetView es = entrySet; return (es != null) ? es : (entrySet = new EntrySetView(this)); } /** * Returns an enumeration of the keys in this table. * * @return an enumeration of the keys in this table * @see #keySet() */ public Enumeration keys() { return new KeyIterator(this); } /** * Returns an enumeration of the values in this table. * * @return an enumeration of the values in this table * @see #values() */ public Enumeration elements() { return new ValueIterator(this); } /** * Returns a partitionable iterator of the keys in this map. * * @return a partitionable iterator of the keys in this map */ public Spliterator keySpliterator() { return new KeyIterator(this); } /** * Returns a partitionable iterator of the values in this map. * * @return a partitionable iterator of the values in this map */ public Spliterator valueSpliterator() { return new ValueIterator(this); } /** * Returns a partitionable iterator of the entries in this map. * * @return a partitionable iterator of the entries in this map */ public Spliterator> entrySpliterator() { return new EntryIterator(this); } /** * Returns the hash code value for this {@link Map}, i.e., * the sum of, for each key-value pair in the map, * {@code key.hashCode() ^ value.hashCode()}. * * @return the hash code value for this map */ public int hashCode() { int h = 0; Traverser it = new Traverser(this); Object v; while ((v = it.advance()) != null) { h += it.nextKey.hashCode() ^ v.hashCode(); } return h; } /** * Returns a string representation of this map. The string * representation consists of a list of key-value mappings (in no * particular order) enclosed in braces ("{@code {}}"). Adjacent * mappings are separated by the characters {@code ", "} (comma * and space). Each key-value mapping is rendered as the key * followed by an equals sign ("{@code =}") followed by the * associated value. * * @return a string representation of this map */ public String toString() { Traverser it = new Traverser(this); StringBuilder sb = new StringBuilder(); sb.append('{'); Object v; if ((v = it.advance()) != null) { for (;;) { Object k = it.nextKey; sb.append(k == this ? "(this Map)" : k); sb.append('='); sb.append(v == this ? "(this Map)" : v); if ((v = it.advance()) == null) break; sb.append(',').append(' '); } } return sb.append('}').toString(); } /** * Compares the specified object with this map for equality. * Returns {@code true} if the given object is a map with the same * mappings as this map. This operation may return misleading * results if either map is concurrently modified during execution * of this method. * * @param o object to be compared for equality with this map * @return {@code true} if the specified object is equal to this map */ public boolean equals(Object o) { if (o != this) { if (!(o instanceof Map)) return false; Map m = (Map) o; Traverser it = new Traverser(this); Object val; while ((val = it.advance()) != null) { Object v = m.get(it.nextKey); if (v == null || (v != val && !v.equals(val))) return false; } for (Map.Entry e : m.entrySet()) { Object mk, mv, v; if ((mk = e.getKey()) == null || (mv = e.getValue()) == null || (v = internalGet(mk)) == null || (mv != v && !mv.equals(v))) return false; } } return true; } /* ----------------Iterators -------------- */ @SuppressWarnings("serial") static final class KeyIterator extends Traverser implements Spliterator, Enumeration { KeyIterator(ConcurrentHashMapV8 map) { super(map); } KeyIterator(Traverser it) { super(it); } public KeyIterator split() { if (nextKey != null) throw new IllegalStateException(); return new KeyIterator(this); } @SuppressWarnings("unchecked") public final K next() { if (nextVal == null && advance() == null) throw new NoSuchElementException(); Object k = nextKey; nextVal = null; return (K) k; } public final K nextElement() { return next(); } } @SuppressWarnings("serial") static final class ValueIterator extends Traverser implements Spliterator, Enumeration { ValueIterator(ConcurrentHashMapV8 map) { super(map); } ValueIterator(Traverser it) { super(it); } public ValueIterator split() { if (nextKey != null) throw new IllegalStateException(); return new ValueIterator(this); } @SuppressWarnings("unchecked") public final V next() { Object v; if ((v = nextVal) == null && (v = advance()) == null) throw new NoSuchElementException(); nextVal = null; return (V) v; } public final V nextElement() { return next(); } } @SuppressWarnings("serial") static final class EntryIterator extends Traverser implements Spliterator> { EntryIterator(ConcurrentHashMapV8 map) { super(map); } EntryIterator(Traverser it) { super(it); } public EntryIterator split() { if (nextKey != null) throw new IllegalStateException(); return new EntryIterator(this); } @SuppressWarnings("unchecked") public final Map.Entry next() { Object v; if ((v = nextVal) == null && (v = advance()) == null) throw new NoSuchElementException(); Object k = nextKey; nextVal = null; return new MapEntry((K)k, (V)v, map); } } /** * Exported Entry for iterators */ static final class MapEntry implements Map.Entry { final K key; // non-null V val; // non-null final ConcurrentHashMapV8 map; MapEntry(K key, V val, ConcurrentHashMapV8 map) { this.key = key; this.val = val; this.map = map; } public final K getKey() { return key; } public final V getValue() { return val; } public final int hashCode() { return key.hashCode() ^ val.hashCode(); } public final String toString(){ return key + "=" + val; } public final boolean equals(Object o) { Object k, v; Map.Entry e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry)o).getKey()) != null && (v = e.getValue()) != null && (k == key || k.equals(key)) && (v == val || v.equals(val))); } /** * Sets our entry's value and writes through to the map. The * value to return is somewhat arbitrary here. Since we do not * necessarily track asynchronous changes, the most recent * "previous" value could be different from what we return (or * could even have been removed in which case the put will * re-establish). We do not and cannot guarantee more. */ public final V setValue(V value) { if (value == null) throw new NullPointerException(); V v = val; val = value; map.put(key, value); return v; } } /* ---------------- Serialization Support -------------- */ /** * Stripped-down version of helper class used in previous version, * declared for the sake of serialization compatibility */ static class Segment implements Serializable { private static final long serialVersionUID = 2249069246763182397L; final float loadFactor; Segment(float lf) { this.loadFactor = lf; } } /** * Saves the state of the {@code ConcurrentHashMapV8} instance to a * stream (i.e., serializes it). * @param s the stream * @serialData * the key (Object) and value (Object) * for each key-value mapping, followed by a null pair. * The key-value mappings are emitted in no particular order. */ @SuppressWarnings("unchecked") private void writeObject(java.io.ObjectOutputStream s) throws java.io.IOException { if (segments == null) { // for serialization compatibility segments = (Segment[]) new Segment[DEFAULT_CONCURRENCY_LEVEL]; for (int i = 0; i < segments.length; ++i) segments[i] = new Segment(LOAD_FACTOR); } s.defaultWriteObject(); Traverser it = new Traverser(this); Object v; while ((v = it.advance()) != null) { s.writeObject(it.nextKey); s.writeObject(v); } s.writeObject(null); s.writeObject(null); segments = null; // throw away } /** * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream */ @SuppressWarnings("unchecked") private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { s.defaultReadObject(); this.segments = null; // unneeded // initialize transient final field UNSAFE.putObjectVolatile(this, counterOffset, new LongAdder()); // Create all nodes, then place in table once size is known long size = 0L; Node p = null; for (;;) { K k = (K) s.readObject(); V v = (V) s.readObject(); if (k != null && v != null) { int h = spread(k.hashCode()); p = new Node(h, k, v, p); ++size; } else break; } if (p != null) { boolean init = false; int n; if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) n = MAXIMUM_CAPACITY; else { int sz = (int)size; n = tableSizeFor(sz + (sz >>> 1) + 1); } int sc = sizeCtl; boolean collide = false; if (n > sc && UNSAFE.compareAndSwapInt(this, sizeCtlOffset, sc, -1)) { try { if (table == null) { init = true; Node[] tab = new Node[n]; int mask = n - 1; while (p != null) { int j = p.hash & mask; Node next = p.next; Node q = p.next = tabAt(tab, j); setTabAt(tab, j, p); if (!collide && q != null && q.hash == p.hash) collide = true; p = next; } table = tab; counter.add(size); sc = n - (n >>> 2); } } finally { sizeCtl = sc; } if (collide) { // rescan and convert to TreeBins Node[] tab = table; for (int i = 0; i < tab.length; ++i) { int c = 0; for (Node e = tabAt(tab, i); e != null; e = e.next) { if (++c > TREE_THRESHOLD && (e.key instanceof Comparable)) { replaceWithTreeBin(tab, i, e.key); break; } } } } } if (!init) { // Can only happen if unsafely published. while (p != null) { internalPut(p.key, p.val); p = p.next; } } } } // ------------------------------------------------------- // Sams /** Interface describing a void action of one argument */ public interface Action { void apply(A a); } /** Interface describing a void action of two arguments */ public interface BiAction { void apply(A a, B b); } /** Interface describing a function of one argument */ public interface Generator { T apply(); } /** Interface describing a function mapping its argument to a double */ public interface ObjectToDouble { double apply(A a); } /** Interface describing a function mapping its argument to a long */ public interface ObjectToLong { long apply(A a); } /** Interface describing a function mapping its argument to an int */ public interface ObjectToInt {int apply(A a); } /** Interface describing a function mapping two arguments to a double */ public interface ObjectByObjectToDouble { double apply(A a, B b); } /** Interface describing a function mapping two arguments to a long */ public interface ObjectByObjectToLong { long apply(A a, B b); } /** Interface describing a function mapping two arguments to an int */ public interface ObjectByObjectToInt {int apply(A a, B b); } /** Interface describing a function mapping a double to a double */ public interface DoubleToDouble { double apply(double a); } /** Interface describing a function mapping a long to a long */ public interface LongToLong { long apply(long a); } /** Interface describing a function mapping an int to an int */ public interface IntToInt { int apply(int a); } /** Interface describing a function mapping two doubles to a double */ public interface DoubleByDoubleToDouble { double apply(double a, double b); } /** Interface describing a function mapping two longs to a long */ public interface LongByLongToLong { long apply(long a, long b); } /** Interface describing a function mapping two ints to an int */ public interface IntByIntToInt { int apply(int a, int b); } /* ----------------Views -------------- */ /** * Base class for views. */ static abstract class CHMView { final ConcurrentHashMapV8 map; CHMView(ConcurrentHashMapV8 map) { this.map = map; } /** * Returns the map backing this view. * * @return the map backing this view */ public ConcurrentHashMapV8 getMap() { return map; } public final int size() { return map.size(); } public final boolean isEmpty() { return map.isEmpty(); } public final void clear() { map.clear(); } // implementations below rely on concrete classes supplying these abstract public Iterator iterator(); abstract public boolean contains(Object o); abstract public boolean remove(Object o); private static final String oomeMsg = "Required array size too large"; public final Object[] toArray() { long sz = map.mappingCount(); if (sz > (long)(MAX_ARRAY_SIZE)) throw new OutOfMemoryError(oomeMsg); int n = (int)sz; Object[] r = new Object[n]; int i = 0; Iterator it = iterator(); while (it.hasNext()) { if (i == n) { if (n >= MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) n = MAX_ARRAY_SIZE; else n += (n >>> 1) + 1; r = Arrays.copyOf(r, n); } r[i++] = it.next(); } return (i == n) ? r : Arrays.copyOf(r, i); } @SuppressWarnings("unchecked") public final T[] toArray(T[] a) { long sz = map.mappingCount(); if (sz > (long)(MAX_ARRAY_SIZE)) throw new OutOfMemoryError(oomeMsg); int m = (int)sz; T[] r = (a.length >= m) ? a : (T[])java.lang.reflect.Array .newInstance(a.getClass().getComponentType(), m); int n = r.length; int i = 0; Iterator it = iterator(); while (it.hasNext()) { if (i == n) { if (n >= MAX_ARRAY_SIZE) throw new OutOfMemoryError(oomeMsg); if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) n = MAX_ARRAY_SIZE; else n += (n >>> 1) + 1; r = Arrays.copyOf(r, n); } r[i++] = (T)it.next(); } if (a == r && i < n) { r[i] = null; // null-terminate return r; } return (i == n) ? r : Arrays.copyOf(r, i); } public final int hashCode() { int h = 0; for (Iterator it = iterator(); it.hasNext();) h += it.next().hashCode(); return h; } public final String toString() { StringBuilder sb = new StringBuilder(); sb.append('['); Iterator it = iterator(); if (it.hasNext()) { for (;;) { Object e = it.next(); sb.append(e == this ? "(this Collection)" : e); if (!it.hasNext()) break; sb.append(',').append(' '); } } return sb.append(']').toString(); } public final boolean containsAll(Collection c) { if (c != this) { for (Iterator it = c.iterator(); it.hasNext();) { Object e = it.next(); if (e == null || !contains(e)) return false; } } return true; } public final boolean removeAll(Collection c) { boolean modified = false; for (Iterator it = iterator(); it.hasNext();) { if (c.contains(it.next())) { it.remove(); modified = true; } } return modified; } public final boolean retainAll(Collection c) { boolean modified = false; for (Iterator it = iterator(); it.hasNext();) { if (!c.contains(it.next())) { it.remove(); modified = true; } } return modified; } } /** * A view of a ConcurrentHashMapV8 as a {@link Set} of keys, in * which additions may optionally be enabled by mapping to a * common value. This class cannot be directly instantiated. See * {@link #keySet}, {@link #keySet(Object)}, {@link #newKeySet()}, * {@link #newKeySet(int)}. */ public static class KeySetView extends CHMView implements Set, java.io.Serializable { private static final long serialVersionUID = 7249069246763182397L; private final V value; KeySetView(ConcurrentHashMapV8 map, V value) { // non-public super(map); this.value = value; } /** * Returns the default mapped value for additions, * or {@code null} if additions are not supported. * * @return the default mapped value for additions, or {@code null} * if not supported. */ public V getMappedValue() { return value; } // implement Set API public boolean contains(Object o) { return map.containsKey(o); } public boolean remove(Object o) { return map.remove(o) != null; } /** * Returns a "weakly consistent" iterator that will never * throw {@link ConcurrentModificationException}, and * guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not * guaranteed to) reflect any modifications subsequent to * construction. * * @return an iterator over the keys of this map */ public Iterator iterator() { return new KeyIterator(map); } public boolean add(K e) { V v; if ((v = value) == null) throw new UnsupportedOperationException(); if (e == null) throw new NullPointerException(); return map.internalPutIfAbsent(e, v) == null; } public boolean addAll(Collection c) { boolean added = false; V v; if ((v = value) == null) throw new UnsupportedOperationException(); for (K e : c) { if (e == null) throw new NullPointerException(); if (map.internalPutIfAbsent(e, v) == null) added = true; } return added; } public boolean equals(Object o) { Set c; return ((o instanceof Set) && ((c = (Set)o) == this || (containsAll(c) && c.containsAll(this)))); } } /** * A view of a ConcurrentHashMapV8 as a {@link Collection} of * values, in which additions are disabled. This class cannot be * directly instantiated. See {@link #values}, * *

The view's {@code iterator} is a "weakly consistent" iterator * that will never throw {@link ConcurrentModificationException}, * and guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not guaranteed to) * reflect any modifications subsequent to construction. */ public static final class ValuesView extends CHMView implements Collection { ValuesView(ConcurrentHashMapV8 map) { super(map); } public final boolean contains(Object o) { return map.containsValue(o); } public final boolean remove(Object o) { if (o != null) { Iterator it = new ValueIterator(map); while (it.hasNext()) { if (o.equals(it.next())) { it.remove(); return true; } } } return false; } /** * Returns a "weakly consistent" iterator that will never * throw {@link ConcurrentModificationException}, and * guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not * guaranteed to) reflect any modifications subsequent to * construction. * * @return an iterator over the values of this map */ public final Iterator iterator() { return new ValueIterator(map); } public final boolean add(V e) { throw new UnsupportedOperationException(); } public final boolean addAll(Collection c) { throw new UnsupportedOperationException(); } } /** * A view of a ConcurrentHashMapV8 as a {@link Set} of (key, value) * entries. This class cannot be directly instantiated. See * {@link #entrySet}. */ public static final class EntrySetView extends CHMView implements Set> { EntrySetView(ConcurrentHashMapV8 map) { super(map); } public final boolean contains(Object o) { Object k, v, r; Map.Entry e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry)o).getKey()) != null && (r = map.get(k)) != null && (v = e.getValue()) != null && (v == r || v.equals(r))); } public final boolean remove(Object o) { Object k, v; Map.Entry e; return ((o instanceof Map.Entry) && (k = (e = (Map.Entry)o).getKey()) != null && (v = e.getValue()) != null && map.remove(k, v)); } /** * Returns a "weakly consistent" iterator that will never * throw {@link ConcurrentModificationException}, and * guarantees to traverse elements as they existed upon * construction of the iterator, and may (but is not * guaranteed to) reflect any modifications subsequent to * construction. * * @return an iterator over the entries of this map */ public final Iterator> iterator() { return new EntryIterator(map); } public final boolean add(Entry e) { K key = e.getKey(); V value = e.getValue(); if (key == null || value == null) throw new NullPointerException(); return map.internalPut(key, value) == null; } public final boolean addAll(Collection> c) { boolean added = false; for (Entry e : c) { if (add(e)) added = true; } return added; } public boolean equals(Object o) { Set c; return ((o instanceof Set) && ((c = (Set)o) == this || (containsAll(c) && c.containsAll(this)))); } } // Unsafe mechanics private static final sun.misc.Unsafe UNSAFE; private static final long counterOffset; private static final long sizeCtlOffset; private static final long ABASE; private static final int ASHIFT; static { int ss; try { UNSAFE = getUnsafe(); Class k = ConcurrentHashMapV8.class; counterOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("counter")); sizeCtlOffset = UNSAFE.objectFieldOffset (k.getDeclaredField("sizeCtl")); Class sc = Node[].class; ABASE = UNSAFE.arrayBaseOffset(sc); ss = UNSAFE.arrayIndexScale(sc); } catch (Exception e) { throw new Error(e); } if ((ss & (ss-1)) != 0) throw new Error("data type scale not a power of two"); ASHIFT = 31 - Integer.numberOfLeadingZeros(ss); } /** * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. * Replace with a simple call to Unsafe.getUnsafe when integrating * into a jdk. * * @return a sun.misc.Unsafe */ private static sun.misc.Unsafe getUnsafe() { try { return sun.misc.Unsafe.getUnsafe(); } catch (SecurityException se) { try { return java.security.AccessController.doPrivileged (new java.security .PrivilegedExceptionAction() { public sun.misc.Unsafe run() throws Exception { java.lang.reflect.Field f = sun.misc .Unsafe.class.getDeclaredField("theUnsafe"); f.setAccessible(true); return (sun.misc.Unsafe) f.get(null); }}); } catch (java.security.PrivilegedActionException e) { throw new RuntimeException("Could not initialize intrinsics", e.getCause()); } } } } thread_safe-0.3.5/ext/org/jruby/ext/thread_safe/JRubyCacheBackendLibrary.java0000644000004100000410000002422612530443654027236 0ustar www-datawww-datapackage org.jruby.ext.thread_safe; import org.jruby.*; import org.jruby.anno.JRubyClass; import org.jruby.anno.JRubyMethod; import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMap; import org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8; import org.jruby.ext.thread_safe.jsr166e.nounsafe.*; import org.jruby.runtime.Block; import org.jruby.runtime.ObjectAllocator; import org.jruby.runtime.ThreadContext; import org.jruby.runtime.builtin.IRubyObject; import org.jruby.runtime.load.Library; import java.io.IOException; import java.util.Map; import static org.jruby.runtime.Visibility.PRIVATE; /** * Native Java implementation to avoid the JI overhead. * * @author thedarkone */ public class JRubyCacheBackendLibrary implements Library { public void load(Ruby runtime, boolean wrap) throws IOException { RubyClass jrubyRefClass = runtime.defineClassUnder("JRubyCacheBackend", runtime.getObject(), BACKEND_ALLOCATOR, runtime.getModule("ThreadSafe")); jrubyRefClass.setAllocator(BACKEND_ALLOCATOR); jrubyRefClass.defineAnnotatedMethods(JRubyCacheBackend.class); } private static final ObjectAllocator BACKEND_ALLOCATOR = new ObjectAllocator() { public IRubyObject allocate(Ruby runtime, RubyClass klazz) { return new JRubyCacheBackend(runtime, klazz); } }; @JRubyClass(name="JRubyCacheBackend", parent="Object") public static class JRubyCacheBackend extends RubyObject { // Defaults used by the CHM static final int DEFAULT_INITIAL_CAPACITY = 16; static final float DEFAULT_LOAD_FACTOR = 0.75f; public static final boolean CAN_USE_UNSAFE_CHM = canUseUnsafeCHM(); private ConcurrentHashMap map; private static ConcurrentHashMap newCHM(int initialCapacity, float loadFactor) { if (CAN_USE_UNSAFE_CHM) { return new ConcurrentHashMapV8(initialCapacity, loadFactor); } else { return new org.jruby.ext.thread_safe.jsr166e.nounsafe.ConcurrentHashMapV8(initialCapacity, loadFactor); } } private static ConcurrentHashMap newCHM() { return newCHM(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); } private static boolean canUseUnsafeCHM() { try { new org.jruby.ext.thread_safe.jsr166e.ConcurrentHashMapV8(); // force class load and initialization return true; } catch (Throwable t) { // ensuring we really do catch everything // Doug's Unsafe setup errors always have this "Could not ini.." message if (isCausedBySecurityException(t)) { return false; } throw (t instanceof RuntimeException ? (RuntimeException) t : new RuntimeException(t)); } } private static boolean isCausedBySecurityException(Throwable t) { while (t != null) { if ((t.getMessage() != null && t.getMessage().contains("Could not initialize intrinsics")) || t instanceof SecurityException) { return true; } t = t.getCause(); } return false; } public JRubyCacheBackend(Ruby runtime, RubyClass klass) { super(runtime, klass); } @JRubyMethod public IRubyObject initialize(ThreadContext context) { map = newCHM(); return context.getRuntime().getNil(); } @JRubyMethod public IRubyObject initialize(ThreadContext context, IRubyObject options) { map = toCHM(context, options); return context.getRuntime().getNil(); } private ConcurrentHashMap toCHM(ThreadContext context, IRubyObject options) { Ruby runtime = context.getRuntime(); if (!options.isNil() && options.respondsTo("[]")) { IRubyObject rInitialCapacity = options.callMethod(context, "[]", runtime.newSymbol("initial_capacity")); IRubyObject rLoadFactor = options.callMethod(context, "[]", runtime.newSymbol("load_factor")); int initialCapacity = !rInitialCapacity.isNil() ? RubyNumeric.num2int(rInitialCapacity.convertToInteger()) : DEFAULT_INITIAL_CAPACITY; float loadFactor = !rLoadFactor.isNil() ? (float)RubyNumeric.num2dbl(rLoadFactor.convertToFloat()) : DEFAULT_LOAD_FACTOR; return newCHM(initialCapacity, loadFactor); } else { return newCHM(); } } @JRubyMethod(name = "[]", required = 1) public IRubyObject op_aref(ThreadContext context, IRubyObject key) { IRubyObject value; return ((value = map.get(key)) == null) ? context.getRuntime().getNil() : value; } @JRubyMethod(name = {"[]="}, required = 2) public IRubyObject op_aset(IRubyObject key, IRubyObject value) { map.put(key, value); return value; } @JRubyMethod public IRubyObject put_if_absent(IRubyObject key, IRubyObject value) { IRubyObject result = map.putIfAbsent(key, value); return result == null ? getRuntime().getNil() : result; } @JRubyMethod public IRubyObject compute_if_absent(final ThreadContext context, final IRubyObject key, final Block block) { return map.computeIfAbsent(key, new ConcurrentHashMap.Fun() { @Override public IRubyObject apply(IRubyObject key) { return block.yieldSpecific(context); } }); } @JRubyMethod public IRubyObject compute_if_present(final ThreadContext context, final IRubyObject key, final Block block) { IRubyObject result = map.computeIfPresent(key, new ConcurrentHashMap.BiFun() { @Override public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); return result.isNil() ? null : result; } }); return result == null ? context.getRuntime().getNil() : result; } @JRubyMethod public IRubyObject compute(final ThreadContext context, final IRubyObject key, final Block block) { IRubyObject result = map.compute(key, new ConcurrentHashMap.BiFun() { @Override public IRubyObject apply(IRubyObject key, IRubyObject oldValue) { IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); return result.isNil() ? null : result; } }); return result == null ? context.getRuntime().getNil() : result; } @JRubyMethod public IRubyObject merge_pair(final ThreadContext context, final IRubyObject key, final IRubyObject value, final Block block) { IRubyObject result = map.merge(key, value, new ConcurrentHashMap.BiFun() { @Override public IRubyObject apply(IRubyObject oldValue, IRubyObject newValue) { IRubyObject result = block.yieldSpecific(context, oldValue == null ? context.getRuntime().getNil() : oldValue); return result.isNil() ? null : result; } }); return result == null ? context.getRuntime().getNil() : result; } @JRubyMethod public RubyBoolean replace_pair(IRubyObject key, IRubyObject oldValue, IRubyObject newValue) { return getRuntime().newBoolean(map.replace(key, oldValue, newValue)); } @JRubyMethod(name = "key?", required = 1) public RubyBoolean has_key_p(IRubyObject key) { return map.containsKey(key) ? getRuntime().getTrue() : getRuntime().getFalse(); } @JRubyMethod public IRubyObject key(IRubyObject value) { final IRubyObject key = map.findKey(value); return key == null ? getRuntime().getNil() : key; } @JRubyMethod public IRubyObject replace_if_exists(IRubyObject key, IRubyObject value) { IRubyObject result = map.replace(key, value); return result == null ? getRuntime().getNil() : result; } @JRubyMethod public IRubyObject get_and_set(IRubyObject key, IRubyObject value) { IRubyObject result = map.put(key, value); return result == null ? getRuntime().getNil() : result; } @JRubyMethod public IRubyObject delete(IRubyObject key) { IRubyObject result = map.remove(key); return result == null ? getRuntime().getNil() : result; } @JRubyMethod public RubyBoolean delete_pair(IRubyObject key, IRubyObject value) { return getRuntime().newBoolean(map.remove(key, value)); } @JRubyMethod public IRubyObject clear() { map.clear(); return this; } @JRubyMethod public IRubyObject each_pair(ThreadContext context, Block block) { for (Map.Entry entry : map.entrySet()) { block.yieldSpecific(context, entry.getKey(), entry.getValue()); } return this; } @JRubyMethod public RubyFixnum size(ThreadContext context) { return context.getRuntime().newFixnum(map.size()); } @JRubyMethod public IRubyObject get_or_default(IRubyObject key, IRubyObject defaultValue) { return map.getValueOrDefault(key, defaultValue); } @JRubyMethod(visibility = PRIVATE) public JRubyCacheBackend initialize_copy(ThreadContext context, IRubyObject other) { map = newCHM(); return this; } } } thread_safe-0.3.5/README.md0000644000004100000410000000461612530443654015310 0ustar www-datawww-data# Threadsafe [![Gem Version](https://badge.fury.io/rb/thread_safe.svg)](http://badge.fury.io/rb/thread_safe) [![Build Status](https://travis-ci.org/ruby-concurrency/thread_safe.svg?branch=master)](https://travis-ci.org/ruby-concurrency/thread_safe) [![Coverage Status](https://img.shields.io/coveralls/ruby-concurrency/thread_safe/master.svg)](https://coveralls.io/r/ruby-concurrency/thread_safe) [![Code Climate](https://codeclimate.com/github/ruby-concurrency/thread_safe.svg)](https://codeclimate.com/github/ruby-concurrency/thread_safe) [![Dependency Status](https://gemnasium.com/ruby-concurrency/thread_safe.svg)](https://gemnasium.com/ruby-concurrency/thread_safe) [![License](https://img.shields.io/badge/license-apache-green.svg)](http://opensource.org/licenses/MIT) [![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/ruby-concurrency/concurrent-ruby) A collection of thread-safe versions of common core Ruby classes. ## Installation Add this line to your application's Gemfile: gem 'thread_safe' And then execute: $ bundle Or install it yourself as: $ gem install thread_safe ## Usage ```ruby require 'thread_safe' sa = ThreadSafe::Array.new # supports standard Array.new forms sh = ThreadSafe::Hash.new # supports standard Hash.new forms ``` `ThreadSafe::Cache` also exists, as a hash-like object, and should have much better performance characteristics esp. under high concurrency than `ThreadSafe::Hash`. However, `ThreadSafe::Cache` is not strictly semantically equivalent to a ruby `Hash` -- for instance, it does not necessarily retain ordering by insertion time as `Hash` does. For most uses it should do fine though, and we recommend you consider `ThreadSafe::Cache` instead of `ThreadSafe::Hash` for your concurrency-safe hash needs. It understands some options when created (depending on your ruby platform) that control some of the internals - when unsure just leave them out: ```ruby require 'thread_safe' cache = ThreadSafe::Cache.new ``` ## Contributing 1. Fork it 2. Clone it (`git clone git@github.com:you/thread_safe.git`) 3. Create your feature branch (`git checkout -b my-new-feature`) 4. Build the jar (`rake jar`) NOTE: Requires JRuby 5. Install dependencies (`bundle install`) 6. Commit your changes (`git commit -am 'Added some feature'`) 7. Push to the branch (`git push origin my-new-feature`) 8. Create new Pull Request